code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.management.subsystems;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
import org.springframework.aop.framework.ProxyFactoryBean;
import org.springframework.aop.support.DefaultPointcutAdvisor;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
/**
* A factory bean, normally used in conjunction with {@link ChildApplicationContextFactory} allowing selected
* interfaces in a child application context to be proxied by a bean in the parent application context. This allows
* 'hot-swapping' and reconfiguration of entire subsystems.
*/
public class SubsystemProxyFactory extends ProxyFactoryBean implements ApplicationContextAware
{
private static final long serialVersionUID = -4186421942840611218L;
/** The source application context factory. */
private ApplicationContextFactory sourceApplicationContextFactory;
private String sourceApplicationContextFactoryName;
private ApplicationContext applicationContext;
/** An optional bean name to look up in the source application context **/
private String sourceBeanName;
private ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private ApplicationContext context;
private Object sourceBean;
private Object defaultBean;
private Map <Class<?>, Object> typedBeans = new HashMap<Class<?>, Object>(7);
/**
* Instantiates a new managed subsystem proxy factory.
*/
public SubsystemProxyFactory()
{
addAdvisor(new DefaultPointcutAdvisor(new MethodInterceptor()
{
public Object invoke(MethodInvocation mi) throws Throwable
{
Method method = mi.getMethod();
try
{
return method.invoke(locateBean(mi), mi.getArguments());
}
catch (InvocationTargetException e)
{
// Unwrap invocation target exceptions
throw e.getTargetException();
}
}
}));
}
@SuppressWarnings("unchecked")
@Override
public void setInterfaces(Class[] interfaces)
{
super.setInterfaces(interfaces);
// Make it possible to export the object via JMX
setTargetClass(getObjectType());
}
/**
* Sets the source application context factory by name.
*
* @param sourceApplicationContextFactoryName
* the name of the sourceApplicationContextFactory to set
*/
public void setSourceApplicationContextFactoryName(String sourceApplicationContextFactoryName)
{
this.sourceApplicationContextFactoryName = sourceApplicationContextFactoryName;
}
/**
* Sets the source application context factory by reference
*
* @param sourceApplicationContextFactory
* the sourceApplicationContextFactory to set
*/
public void setSourceApplicationContextFactory(ApplicationContextFactory sourceApplicationContextFactory)
{
this.sourceApplicationContextFactory = sourceApplicationContextFactory;
}
private ApplicationContextFactory getSourceApplicationContextFactory()
{
if (sourceApplicationContextFactory != null)
{
return sourceApplicationContextFactory;
}
else
{
try
{
return applicationContext.getBean(sourceApplicationContextFactoryName, ApplicationContextFactory.class);
} catch (NoSuchBeanDefinitionException e)
{
return null;
}
}
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException
{
this.applicationContext = applicationContext;
}
/**
* Sets an optional bean name to target all calls to in the source application context. If not set, an appropriate
* bean is looked up based on method class.
*
* @param sourceBeanName
* the sourceBeanName to set
*/
public void setSourceBeanName(String sourceBeanName)
{
this.sourceBeanName = sourceBeanName;
}
/**
* Sets an optional default bean to be used if the target bean is not found. Generally used when a subsystem does not
* exist.
*
* @param defaultBean
* the defaultBean to set
*/
public void setDefaultBean(Object defaultBean)
{
this.defaultBean = defaultBean;
}
// Bring our cached copies of the source beans in line with the application context factory, using a RW lock to
// ensure consistency
protected Object locateBean(MethodInvocation mi)
{
boolean haveWriteLock = false;
this.lock.readLock().lock();
try
{
ApplicationContextFactory sourceApplicationContextFactory = getSourceApplicationContextFactory();
if (sourceApplicationContextFactory != null)
{
ApplicationContext newContext = sourceApplicationContextFactory.getApplicationContext();
if (this.context != newContext)
{
// Upgrade the lock
this.lock.readLock().unlock();
this.lock.writeLock().lock();
haveWriteLock = true;
newContext = sourceApplicationContextFactory.getApplicationContext();
this.context = newContext;
this.typedBeans.clear();
this.sourceBean = null;
if (this.sourceBeanName != null)
{
this.sourceBean = newContext.getBean(this.sourceBeanName);
}
}
if (this.sourceBean == null)
{
Method method = mi.getMethod();
Class<?> type = method.getDeclaringClass();
Object bean = this.typedBeans.get(type);
if (bean == null)
{
// Upgrade the lock if necessary
if (!haveWriteLock)
{
this.lock.readLock().unlock();
this.lock.writeLock().lock();
haveWriteLock = true;
}
bean = this.typedBeans.get(type);
if (bean == null)
{
Map<?, ?> beans = this.context.getBeansOfType(type);
if (beans.size() == 0 && defaultBean != null)
{
bean = defaultBean;
}
else
{
if (beans.size() != 1)
{
throw new RuntimeException("Don't know where to route call to method " + method);
}
bean = beans.values().iterator().next();
this.typedBeans.put(type, bean);
}
}
}
return bean;
}
return this.sourceBean;
}
else
{
return defaultBean;
}
}
finally
{
if (haveWriteLock)
{
this.lock.writeLock().unlock();
}
else
{
this.lock.readLock().unlock();
}
}
}
}
| Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/management/subsystems/SubsystemProxyFactory.java | Java | lgpl-3.0 | 9,452 |
/*
* SonarQube, open source software quality management tool.
* Copyright (C) 2008-2014 SonarSource
* mailto:contact AT sonarsource DOT com
*
* SonarQube is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* SonarQube is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package selenium;
import com.google.common.base.Predicate;
import com.google.common.base.Supplier;
import org.openqa.selenium.InvalidElementStateException;
import org.openqa.selenium.NotFoundException;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.WebDriverException;
import java.util.NoSuchElementException;
import java.util.concurrent.TimeUnit;
import static java.util.concurrent.TimeUnit.SECONDS;
class Retry {
public static final Retry _30_SECONDS = new Retry(30, SECONDS);
private final long timeoutInMs;
Retry(long duration, TimeUnit timeUnit) {
this.timeoutInMs = timeUnit.toMillis(duration);
}
<T> void execute(Supplier<Optional<T>> target, Consumer<T> action) {
WebDriverException lastError = null;
boolean retried = false;
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < timeoutInMs) {
try {
Optional<T> targetElement = target.get();
if (targetElement.isPresent()) {
action.accept(targetElement.get());
if (retried) {
System.out.println();
}
return;
}
} catch (StaleElementReferenceException e) {
// ignore
} catch (WebDriverException e) {
lastError = e;
}
retried = true;
System.out.print(".");
}
if (retried) {
System.out.println();
}
if (lastError != null) {
throw lastError;
}
throw new NoSuchElementException("Not found");
}
<T> void execute(Runnable action) {
WebDriverException lastError = null;
boolean retried = false;
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < timeoutInMs) {
try {
action.run();
if (retried) {
System.out.println();
}
return;
} catch (StaleElementReferenceException e) {
// ignore
} catch (WebDriverException e) {
lastError = e;
}
retried = true;
System.out.print(".");
}
if (retried) {
System.out.println();
}
if (lastError != null) {
throw lastError;
}
throw new NoSuchElementException("Not found");
}
<T> boolean verify(Supplier<T> targetSupplier, Predicate<T> predicate) throws NoSuchElementException {
Error error = Error.KO;
boolean retried = false;
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < timeoutInMs) {
try {
if (predicate.apply(targetSupplier.get())) {
if (retried) {
System.out.println();
}
return true;
}
error = Error.KO;
} catch (InvalidElementStateException e) {
error = Error.KO;
} catch (NotFoundException e) {
error = Error.NOT_FOUND;
} catch (StaleElementReferenceException e) {
// ignore
}
retried = true;
System.out.print(".");
}
if (retried) {
System.out.println();
}
if (error == Error.NOT_FOUND) {
throw new NoSuchElementException("Not found");
}
return false;
}
enum Error {
NOT_FOUND, KO
}
}
| abbeyj/sonarqube | it/it-tests/src/test/java/selenium/Retry.java | Java | lgpl-3.0 | 4,076 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.security.authentication;
import org.alfresco.filesys.auth.ftp.FTPAuthenticatorBase;
import org.alfresco.jlan.ftp.FTPSrvSession;
import org.alfresco.jlan.server.auth.ClientInfo;
import java.util.List;
/**
* Base chaining FTP Authenticator class. Where appropriate, methods will 'chain' across multiple
* {@link FTPAuthenticatorBase} instances, as returned by {@link #getUsableFtpAuthenticators()}.
*
* @author alex.mukha
* @since 4.2.1
*/
public abstract class AbstractChainingFtpAuthenticator extends FTPAuthenticatorBase
{
@Override
public boolean authenticateUser(ClientInfo info, FTPSrvSession sess)
{
for (FTPAuthenticatorBase authenticator : getUsableFtpAuthenticators())
{
if (authenticator.authenticateUser(info, sess))
return true;
}
// authentication failed in all of the authenticators
return false;
}
/**
* Gets the FTP authenticators across which methods will chain.
*
* @return the usable FTP authenticators
*/
protected abstract List<FTPAuthenticatorBase> getUsableFtpAuthenticators();
}
| Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/security/authentication/AbstractChainingFtpAuthenticator.java | Java | lgpl-3.0 | 2,217 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.search.impl.solr;
/**
* Identifies an attempt to use a disabled feature.
*
* @author Matt Ward
*/
public class DisabledFeatureException extends RuntimeException
{
private static final long serialVersionUID = 1L;
DisabledFeatureException(String message)
{
super(message);
}
} | Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/search/impl/solr/DisabledFeatureException.java | Java | lgpl-3.0 | 1,375 |
# Copyright 2011 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in Google or-tools.
From the OPL model volsay.mod
Using arrays.
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.linear_solver import pywraplp
def main(unused_argv):
# Create the solver.
# using GLPK
# solver = pywraplp.Solver('CoinsGridGLPK',
# pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
# Using CLP
solver = pywraplp.Solver('CoinsGridCLP',
pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [solver.NumVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)]
#
# constraints
#
solver.Add(production[Gas] + production[Chloride] <= 50)
solver.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
objective = solver.Maximize(40 * production[Gas] + 50 * production[Chloride])
print('NumConstraints:', solver.NumConstraints())
#
# solution and search
#
solver.Solve()
print()
print('objective = ', solver.Objective().Value())
for i in range(num_products):
print(products[i], '=', production[i].SolutionValue(), end=' ')
print('ReducedCost = ', production[i].ReducedCost())
if __name__ == '__main__':
main('Volsay')
| WendellDuncan/or-tools | examples/python/volsay2.py | Python | apache-2.0 | 2,080 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deviceplugin
import (
"fmt"
"net"
"sync"
"time"
"github.com/golang/glog"
"golang.org/x/net/context"
"google.golang.org/grpc"
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha"
)
// endpoint maps to a single registered device plugin. It is responsible
// for managing gRPC communications with the device plugin and caching
// device states reported by the device plugin.
type endpoint interface {
run()
stop()
allocate(devs []string) (*pluginapi.AllocateResponse, error)
getDevices() []pluginapi.Device
callback(resourceName string, added, updated, deleted []pluginapi.Device)
}
type endpointImpl struct {
client pluginapi.DevicePluginClient
clientConn *grpc.ClientConn
socketPath string
resourceName string
devices map[string]pluginapi.Device
mutex sync.Mutex
cb monitorCallback
}
// newEndpoint creates a new endpoint for the given resourceName.
func newEndpointImpl(socketPath, resourceName string, devices map[string]pluginapi.Device, callback monitorCallback) (*endpointImpl, error) {
client, c, err := dial(socketPath)
if err != nil {
glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err)
return nil, err
}
return &endpointImpl{
client: client,
clientConn: c,
socketPath: socketPath,
resourceName: resourceName,
devices: devices,
cb: callback,
}, nil
}
func (e *endpointImpl) callback(resourceName string, added, updated, deleted []pluginapi.Device) {
e.cb(resourceName, added, updated, deleted)
}
func (e *endpointImpl) getDevices() []pluginapi.Device {
e.mutex.Lock()
defer e.mutex.Unlock()
var devs []pluginapi.Device
for _, d := range e.devices {
devs = append(devs, d)
}
return devs
}
// run initializes ListAndWatch gRPC call for the device plugin and
// blocks on receiving ListAndWatch gRPC stream updates. Each ListAndWatch
// stream update contains a new list of device states. listAndWatch compares the new
// device states with its cached states to get list of new, updated, and deleted devices.
// It then issues a callback to pass this information to the device manager which
// will adjust the resource available information accordingly.
func (e *endpointImpl) run() {
stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{})
if err != nil {
glog.Errorf(errListAndWatch, e.resourceName, err)
return
}
devices := make(map[string]pluginapi.Device)
e.mutex.Lock()
for _, d := range e.devices {
devices[d.ID] = d
}
e.mutex.Unlock()
for {
response, err := stream.Recv()
if err != nil {
glog.Errorf(errListAndWatch, e.resourceName, err)
return
}
devs := response.Devices
glog.V(2).Infof("State pushed for device plugin %s", e.resourceName)
newDevs := make(map[string]*pluginapi.Device)
var added, updated []pluginapi.Device
for _, d := range devs {
dOld, ok := devices[d.ID]
newDevs[d.ID] = d
if !ok {
glog.V(2).Infof("New device for Endpoint %s: %v", e.resourceName, d)
devices[d.ID] = *d
added = append(added, *d)
continue
}
if d.Health == dOld.Health {
continue
}
if d.Health == pluginapi.Unhealthy {
glog.Errorf("Device %s is now Unhealthy", d.ID)
} else if d.Health == pluginapi.Healthy {
glog.V(2).Infof("Device %s is now Healthy", d.ID)
}
devices[d.ID] = *d
updated = append(updated, *d)
}
var deleted []pluginapi.Device
for id, d := range devices {
if _, ok := newDevs[id]; ok {
continue
}
glog.Errorf("Device %s was deleted", d.ID)
deleted = append(deleted, d)
delete(devices, id)
}
e.mutex.Lock()
// NOTE: Return a copy of 'devices' instead of returning a direct reference to local 'devices'
e.devices = make(map[string]pluginapi.Device)
for _, d := range devices {
e.devices[d.ID] = d
}
e.mutex.Unlock()
e.callback(e.resourceName, added, updated, deleted)
}
}
// allocate issues Allocate gRPC call to the device plugin.
func (e *endpointImpl) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
return e.client.Allocate(context.Background(), &pluginapi.AllocateRequest{
DevicesIDs: devs,
})
}
func (e *endpointImpl) stop() {
e.clientConn.Close()
}
// dial establishes the gRPC communication with the registered device plugin. https://godoc.org/google.golang.org/grpc#Dial
func dial(unixSocketPath string) (pluginapi.DevicePluginClient, *grpc.ClientConn, error) {
c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(),
grpc.WithTimeout(10*time.Second),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}),
)
if err != nil {
return nil, nil, fmt.Errorf(errFailedToDialDevicePlugin+" %v", err)
}
return pluginapi.NewDevicePluginClient(c), c, nil
}
| lichen2013/kubernetes | pkg/kubelet/cm/deviceplugin/endpoint.go | GO | apache-2.0 | 5,390 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.util.test;
import java.io.IOException;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import org.apache.gobblin.configuration.SourceState;
import org.apache.gobblin.configuration.WorkUnitState;
import org.apache.gobblin.source.extractor.DataRecordException;
import org.apache.gobblin.source.extractor.Extractor;
import org.apache.gobblin.source.workunit.WorkUnit;
/**
* Unit tests for {@link StressTestingSource}
*/
public class TestStressTestingSource {
@Test
public void testSourceExtractor() throws DataRecordException, IOException {
final int MEM_ALLOC_BYTES = 100;
final int NUM_WORK_UNITS = 10;
final int COMPUTE_TIME_MICRO = 10;
final int NUM_RECORDS = 10000;
SourceState state = new SourceState();
state.setProp(StressTestingSource.NUM_WORK_UNITS_KEY, NUM_WORK_UNITS);
state.setProp(StressTestingSource.MEM_ALLOC_BYTES_KEY, MEM_ALLOC_BYTES);
state.setProp(StressTestingSource.COMPUTE_TIME_MICRO_KEY, COMPUTE_TIME_MICRO);
state.setProp(StressTestingSource.NUM_RECORDS_KEY, NUM_RECORDS);
StressTestingSource source = new StressTestingSource();
List<WorkUnit> wus = source.getWorkunits(state);
Assert.assertEquals(wus.size(), NUM_WORK_UNITS);
for (int i = 0; i < wus.size(); ++i) {
WorkUnit wu = wus.get(i);
WorkUnitState wuState = new WorkUnitState(wu, state);
Extractor<String, byte[]> extractor = source.getExtractor(wuState);
Assert.assertEquals(extractor.getExpectedRecordCount(), NUM_RECORDS);
Assert.assertEquals(extractor.readRecord(null).length, 100);
}
}
@Test (enabled=false)
public void testComputeTime() throws DataRecordException, IOException {
final int MEM_ALLOC_BYTES = 100;
final int NUM_WORK_UNITS = 1;
final int COMPUTE_TIME_MICRO = 10000;
final int NUM_RECORDS = 500;
SourceState state = new SourceState();
state.setProp(StressTestingSource.NUM_WORK_UNITS_KEY, NUM_WORK_UNITS);
state.setProp(StressTestingSource.MEM_ALLOC_BYTES_KEY, MEM_ALLOC_BYTES);
state.setProp(StressTestingSource.COMPUTE_TIME_MICRO_KEY, COMPUTE_TIME_MICRO);
state.setProp(StressTestingSource.NUM_RECORDS_KEY, NUM_RECORDS);
StressTestingSource source = new StressTestingSource();
List<WorkUnit> wus = source.getWorkunits(state);
Assert.assertEquals(wus.size(), NUM_WORK_UNITS);
WorkUnit wu = wus.get(0);
WorkUnitState wuState = new WorkUnitState(wu, state);
Extractor<String, byte[]> extractor = source.getExtractor(wuState);
byte[] record;
long startTimeNano = System.nanoTime();
while ((record = extractor.readRecord(null)) != null) {
Assert.assertEquals(record.length, 100);
}
long endTimeNano = System.nanoTime();
long timeSpentMicro = (endTimeNano - startTimeNano)/(1000);
// check that there is less than 5 second difference between expected and actual time spent
Assert.assertTrue(Math.abs(timeSpentMicro - (COMPUTE_TIME_MICRO * NUM_RECORDS)) < (5000000),
"Time spent " + timeSpentMicro);
}
@Test (enabled=false)
public void testSleepTime() throws DataRecordException, IOException {
final int MEM_ALLOC_BYTES = 100;
final int NUM_WORK_UNITS = 1;
final int SLEEP_TIME_MICRO = 10000;
final int NUM_RECORDS = 500;
SourceState state = new SourceState();
state.setProp(StressTestingSource.NUM_WORK_UNITS_KEY, NUM_WORK_UNITS);
state.setProp(StressTestingSource.MEM_ALLOC_BYTES_KEY, MEM_ALLOC_BYTES);
state.setProp(StressTestingSource.SLEEP_TIME_MICRO_KEY, SLEEP_TIME_MICRO);
state.setProp(StressTestingSource.NUM_RECORDS_KEY, NUM_RECORDS);
StressTestingSource source = new StressTestingSource();
List<WorkUnit> wus = source.getWorkunits(state);
Assert.assertEquals(wus.size(), NUM_WORK_UNITS);
WorkUnit wu = wus.get(0);
WorkUnitState wuState = new WorkUnitState(wu, state);
Extractor<String, byte[]> extractor = source.getExtractor(wuState);
byte[] record;
long startTimeNano = System.nanoTime();
while ((record = extractor.readRecord(null)) != null) {
Assert.assertEquals(record.length, 100);
}
long endTimeNano = System.nanoTime();
long timeSpentMicro = (endTimeNano - startTimeNano)/(1000);
// check that there is less than 2 second difference between expected and actual time spent
Assert.assertTrue(Math.abs(timeSpentMicro - (SLEEP_TIME_MICRO * NUM_RECORDS)) < (2000000),
"Time spent " + timeSpentMicro);
}
@Test (enabled=false)
public void testRunDuration() throws DataRecordException, IOException {
final int MEM_ALLOC_BYTES = 100;
final int NUM_WORK_UNITS = 1;
final int SLEEP_TIME_MICRO = 1000;
final int NUM_RECORDS = 30; // this config is ignored since the duration is set
final int RUN_DURATION_SECS = 5;
SourceState state = new SourceState();
state.setProp(StressTestingSource.NUM_WORK_UNITS_KEY, NUM_WORK_UNITS);
state.setProp(StressTestingSource.MEM_ALLOC_BYTES_KEY, MEM_ALLOC_BYTES);
state.setProp(StressTestingSource.SLEEP_TIME_MICRO_KEY, SLEEP_TIME_MICRO);
state.setProp(StressTestingSource.NUM_RECORDS_KEY, NUM_RECORDS);
state.setProp(StressTestingSource.RUN_DURATION_KEY, RUN_DURATION_SECS);
StressTestingSource source = new StressTestingSource();
List<WorkUnit> wus = source.getWorkunits(state);
Assert.assertEquals(wus.size(), NUM_WORK_UNITS);
WorkUnit wu = wus.get(0);
WorkUnitState wuState = new WorkUnitState(wu, state);
Extractor<String, byte[]> extractor = source.getExtractor(wuState);
byte[] record;
long startTimeNano = System.nanoTime();
while ((record = extractor.readRecord(null)) != null) {
Assert.assertEquals(record.length, 100);
}
long endTimeNano = System.nanoTime();
long timeSpentMicro = (endTimeNano - startTimeNano)/(1000);
// check that there is less than 1 second difference between expected and actual time spent
Assert.assertTrue(Math.abs(timeSpentMicro - (RUN_DURATION_SECS * 1000000)) < (1000000),
"Time spent " + timeSpentMicro);
}
}
| aditya1105/gobblin | gobblin-utility/src/test/java/org/apache/gobblin/util/test/TestStressTestingSource.java | Java | apache-2.0 | 6,917 |
"""Tests for the Verisure platform."""
from contextlib import contextmanager
from unittest.mock import call, patch
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
)
from homeassistant.components.verisure import DOMAIN as VERISURE_DOMAIN
from homeassistant.const import STATE_UNLOCKED
from homeassistant.setup import async_setup_component
NO_DEFAULT_LOCK_CODE_CONFIG = {
"verisure": {
"username": "test",
"password": "test",
"locks": True,
"alarm": False,
"door_window": False,
"hygrometers": False,
"mouse": False,
"smartplugs": False,
"thermometers": False,
"smartcam": False,
}
}
DEFAULT_LOCK_CODE_CONFIG = {
"verisure": {
"username": "test",
"password": "test",
"locks": True,
"default_lock_code": "9999",
"alarm": False,
"door_window": False,
"hygrometers": False,
"mouse": False,
"smartplugs": False,
"thermometers": False,
"smartcam": False,
}
}
LOCKS = ["door_lock"]
@contextmanager
def mock_hub(config, get_response=LOCKS[0]):
"""Extensively mock out a verisure hub."""
hub_prefix = "homeassistant.components.verisure.lock.hub"
# Since there is no conf to disable ethernet status, mock hub for
# binary sensor too
hub_binary_sensor = "homeassistant.components.verisure.binary_sensor.hub"
verisure_prefix = "verisure.Session"
with patch(verisure_prefix) as session, patch(hub_prefix) as hub:
session.login.return_value = True
hub.config = config["verisure"]
hub.get.return_value = LOCKS
hub.get_first.return_value = get_response.upper()
hub.session.set_lock_state.return_value = {
"doorLockStateChangeTransactionId": "test"
}
hub.session.get_lock_state_transaction.return_value = {"result": "OK"}
with patch(hub_binary_sensor, hub):
yield hub
async def setup_verisure_locks(hass, config):
"""Set up mock verisure locks."""
with mock_hub(config):
await async_setup_component(hass, VERISURE_DOMAIN, config)
await hass.async_block_till_done()
# lock.door_lock, ethernet_status
assert len(hass.states.async_all()) == 2
async def test_verisure_no_default_code(hass):
"""Test configs without a default lock code."""
await setup_verisure_locks(hass, NO_DEFAULT_LOCK_CODE_CONFIG)
with mock_hub(NO_DEFAULT_LOCK_CODE_CONFIG, STATE_UNLOCKED) as hub:
mock = hub.session.set_lock_state
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_count == 0
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock", "code": "12345"}
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "lock")
mock.reset_mock()
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_count == 0
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{"entity_id": "lock.door_lock", "code": "12345"},
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "unlock")
async def test_verisure_default_code(hass):
"""Test configs with a default lock code."""
await setup_verisure_locks(hass, DEFAULT_LOCK_CODE_CONFIG)
with mock_hub(DEFAULT_LOCK_CODE_CONFIG, STATE_UNLOCKED) as hub:
mock = hub.session.set_lock_state
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_args == call("9999", LOCKS[0], "lock")
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, {"entity_id": "lock.door_lock"}
)
await hass.async_block_till_done()
assert mock.call_args == call("9999", LOCKS[0], "unlock")
await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, {"entity_id": "lock.door_lock", "code": "12345"}
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "lock")
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{"entity_id": "lock.door_lock", "code": "12345"},
)
await hass.async_block_till_done()
assert mock.call_args == call("12345", LOCKS[0], "unlock")
| partofthething/home-assistant | tests/components/verisure/test_lock.py | Python | apache-2.0 | 4,819 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.sparkrest;
import org.apache.camel.CamelContext;
import org.apache.camel.support.DefaultMessage;
import spark.Request;
import spark.Response;
/**
* Spark based {@link org.apache.camel.Message}.
* <p/>
* This implementation allows direct access to the Spark {@link Request} using
* the {@link #getRequest()} method.
*/
public class SparkMessage extends DefaultMessage {
private final transient Request request;
private final transient Response response;
public SparkMessage(CamelContext camelContext, Request request, Response response) {
super(camelContext);
this.request = request;
this.response = response;
}
public Request getRequest() {
return request;
}
public Response getResponse() {
return response;
}
@Override
public DefaultMessage newInstance() {
return new SparkMessage(getCamelContext(), request, response);
}
}
| ullgren/camel | components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkMessage.java | Java | apache-2.0 | 1,767 |
/*
* Copyright © 2015 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.api.dataset.lib;
import co.cask.cdap.api.mapreduce.MapReduceTaskContext;
/**
* Responsible for dynamically determining a @{link PartitionKey}.
* For each K, V pair, the getPartitionKey(K, V) method is called to determine a PartitionKey.
*
* @param <K> Type of key
* @param <V> Type of value
*/
public abstract class DynamicPartitioner<K, V> {
/**
* Initializes a DynamicPartitioner.
* <p>
* This method will be called only once per {@link DynamicPartitioner} instance. It is the first method call
* on that instance.
* </p>
* @param mapReduceTaskContext the mapReduceTaskContext for the task that this DynamicPartitioner is running in.
* Note that the hadoop context is not available on this MapReduceTaskContext.
*/
public void initialize(MapReduceTaskContext<K, V> mapReduceTaskContext) {
// do nothing by default
}
/**
* Destroys a DynamicPartitioner.
* <p>
* This method will be called only once per {@link DynamicPartitioner} instance. It is the last method call
* on that instance.
* </p>
*/
public void destroy() {
// do nothing by default
}
/**
* Determine the PartitionKey for the key-value pair to be written to.
*
* @param key the key to be written
* @param value the value to be written
* @return the {@link PartitionKey} for the key-value pair to be written to.
*/
public abstract PartitionKey getPartitionKey(K key, V value);
}
| chtyim/cdap | cdap-api/src/main/java/co/cask/cdap/api/dataset/lib/DynamicPartitioner.java | Java | apache-2.0 | 2,078 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.aries.spifly.dynamic;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.security.ProtectionDomain;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Dictionary;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.aries.spifly.BaseActivator;
import org.apache.aries.spifly.SpiFlyConstants;
import org.apache.aries.spifly.Streams;
import org.easymock.EasyMock;
import org.easymock.IAnswer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleReference;
import org.osgi.framework.Version;
import org.osgi.framework.hooks.weaving.WeavingHook;
import org.osgi.framework.hooks.weaving.WovenClass;
import org.osgi.framework.wiring.BundleRevision;
import org.osgi.framework.wiring.BundleWiring;
public class ClientWeavingHookTest {
DynamicWeavingActivator activator;
private static final String thisJVMsDBF = DocumentBuilderFactory.newInstance().getClass().getName();
@Before
public void setUp() {
activator = new DynamicWeavingActivator();
BaseActivator.activator = activator;
}
@After
public void tearDown() {
BaseActivator.activator = null;
activator = null;
}
@Test
public void testBasicServiceLoaderUsage() throws Exception {
Dictionary<String, String> consumerHeaders = new Hashtable<String, String>();
consumerHeaders.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "*");
// Register the bundle that provides the SPI implementation.
Bundle providerBundle = mockProviderBundle("impl1", 1);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(consumerHeaders, providerBundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle("spifly", Version.parseVersion("1.9.4"), consumerBundle, providerBundle);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
Assert.assertNotNull("Precondition", clsUrl);
String clientClassName = "org.apache.aries.spifly.dynamic.TestClient";
WovenClass wc = new MyWovenClass(clsUrl, clientClassName, consumerBundle);
Assert.assertEquals("Precondition", 0, wc.getDynamicImports().size());
wh.weave(wc);
Assert.assertEquals(1, wc.getDynamicImports().size());
String di1 = "org.apache.aries.spifly;bundle-symbolic-name=spifly;bundle-version=1.9.4";
String di2 = "org.apache.aries.spifly;bundle-version=1.9.4;bundle-symbolic-name=spifly";
String di = wc.getDynamicImports().get(0);
Assert.assertTrue("Weaving should have added a dynamic import", di1.equals(di) || di2.equals(di));
// Invoke the woven class and check that it properly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl1 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Assert.assertEquals(Collections.singleton("olleh"), result);
}
@Test
public void testTCCLResetting() throws Exception {
ClassLoader cl = new URLClassLoader(new URL [] {});
Thread.currentThread().setContextClassLoader(cl);
Assert.assertSame("Precondition", cl, Thread.currentThread().getContextClassLoader());
Dictionary<String, String> consumerHeaders = new Hashtable<String, String>();
consumerHeaders.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "*");
// Register the bundle that provides the SPI implementation.
Bundle providerBundle = mockProviderBundle("impl1", 1);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(consumerHeaders, providerBundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle("spifly", Version.parseVersion("1.9.4"), consumerBundle, providerBundle);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
Assert.assertNotNull("Precondition", clsUrl);
String clientClassName = "org.apache.aries.spifly.dynamic.TestClient";
WovenClass wc = new MyWovenClass(clsUrl, clientClassName, consumerBundle);
Assert.assertEquals("Precondition", 0, wc.getDynamicImports().size());
wh.weave(wc);
Assert.assertEquals(1, wc.getDynamicImports().size());
String di1 = "org.apache.aries.spifly;bundle-symbolic-name=spifly;bundle-version=1.9.4";
String di2 = "org.apache.aries.spifly;bundle-version=1.9.4;bundle-symbolic-name=spifly";
String di = wc.getDynamicImports().get(0);
Assert.assertTrue("Weaving should have added a dynamic import", di1.equals(di) || di2.equals(di));
// Invoke the woven class and check that it properly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl1 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
method.invoke(cls.newInstance(), "hi there");
Assert.assertSame(cl, Thread.currentThread().getContextClassLoader());
}
@Test
public void testTCCLResettingOnException() {
// TODO
}
@Test
public void testAltServiceLoaderLoadUnprocessed() throws Exception {
Bundle spiFlyBundle = mockSpiFlyBundle();
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "*");
Bundle consumerBundle = mockConsumerBundle(headers, spiFlyBundle);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("UnaffectedTestClient.class");
Assert.assertNotNull("Precondition", clsUrl);
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.UnaffectedTestClient", consumerBundle);
Assert.assertEquals("Precondition", 0, wc.getDynamicImports().size());
wh.weave(wc);
Assert.assertEquals("The client is not affected so no additional imports should have been added",
0, wc.getDynamicImports().size());
// ok the weaving is done, now prepare the registry for the call
Bundle providerBundle = mockProviderBundle("impl1", 1);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle, new HashMap<String, Object>());
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl1 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Assert.assertEquals("impl4", result);
}
@Test
public void testMultipleProviders() throws Exception {
Bundle spiFlyBundle = mockSpiFlyBundle();
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "*");
Bundle consumerBundle = mockConsumerBundle(headers, spiFlyBundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI files from impl1 and impl2 are visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Set<String> expected = new HashSet<String>(Arrays.asList("olleh", "HELLO", "5"));
Assert.assertEquals("All three services should be invoked", expected, result);
}
@Test
public void testClientSpecifyingProvider() throws Exception {
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "java.util.ServiceLoader#load(java.lang.Class);bundle=impl2");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle1, providerBundle2);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle, providerBundle1, providerBundle2);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Set<String> expected = new HashSet<String>(Arrays.asList("HELLO", "5"));
Assert.assertEquals("Only the services from bundle impl2 should be selected", expected, result);
}
@Test
public void testClientSpecifyingProviderVersion() throws Exception {
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "java.util.ServiceLoader#load(java.lang.Class);bundle=impl2:version=1.2.3");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
Bundle providerBundle3 = mockProviderBundle("impl2_123", 3, new Version(1, 2, 3));
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle3, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle1, providerBundle2, providerBundle3);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle, providerBundle1, providerBundle2, providerBundle3);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Assert.assertEquals("Only the services from bundle impl2 should be selected", Collections.singleton("Updated!hello!Updated"), result);
}
@Test
public void testClientMultipleTargetBundles() throws Exception {
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"java.util.ServiceLoader#load(java.lang.Class);bundle=impl1|impl4");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
Bundle providerBundle4 = mockProviderBundle("impl4", 4);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle4, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle4, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle1, providerBundle2, providerBundle4);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle, providerBundle1, providerBundle2, providerBundle4);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Set<String> expected = new HashSet<String>(Arrays.asList("olleh", "impl4"));
Assert.assertEquals("All providers should be selected for this one", expected, result);
}
@Test
public void testClientMultipleTargetBundles2() throws Exception {
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"java.util.ServiceLoader#load(java.lang.Class);bundleId=1|4");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
Bundle providerBundle4 = mockProviderBundle("impl4", 4);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle4, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle4, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle1, providerBundle2, providerBundle4);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle, providerBundle1, providerBundle2, providerBundle4);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Set<String> expected = new HashSet<String>(Arrays.asList("olleh", "impl4"));
Assert.assertEquals("All providers should be selected for this one", expected, result);
}
@Test
public void testClientSpecificProviderLoadArgument() throws Exception {
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"java.util.ServiceLoader#load(java.lang.Class[org.apache.aries.mytest.MySPI])," +
"java.util.ServiceLoader#load(java.lang.Class[org.apache.aries.mytest.AltSPI]);bundle=impl4");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
Bundle providerBundle4 = mockProviderBundle("impl4", 4);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle4, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle4, new HashMap<String, Object>());
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle1, providerBundle2, providerBundle4);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle, providerBundle1, providerBundle2, providerBundle4);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.TestClient", consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Set<String> expected = new HashSet<String>(Arrays.asList("olleh", "impl4", "HELLO", "5"));
Assert.assertEquals("All providers should be selected for this one", expected, result);
// Weave the AltTestClient class.
URL cls2Url = getClass().getResource("AltTestClient.class");
WovenClass wc2 = new MyWovenClass(cls2Url, "org.apache.aries.spifly.dynamic.AltTestClient", consumerBundle);
wh.weave(wc2);
// Invoke the AltTestClient
Class<?> cls2 = wc2.getDefinedClass();
Method method2 = cls2.getMethod("test", new Class [] {long.class});
Object result2 = method2.invoke(cls2.newInstance(), 4096);
Assert.assertEquals("Only the services from bundle impl4 should be selected", -4096L, result2);
}
@Test
public void testClientSpecifyingDifferentMethodsLimitedToDifferentProviders() throws Exception {
Dictionary<String, String> headers1 = new Hashtable<String, String>();
headers1.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"javax.xml.parsers.DocumentBuilderFactory#newInstance();bundle=impl3," +
"java.util.ServiceLoader#load(java.lang.Class[org.apache.aries.mytest.MySPI]);bundle=impl4");
Dictionary<String, String> headers2 = new Hashtable<String, String>();
headers2.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"javax.xml.parsers.DocumentBuilderFactory#newInstance();bundle=system.bundle," +
"java.util.ServiceLoader#load;bundle=impl1");
Dictionary<String, String> headers3 = new Hashtable<String, String>();
headers3.put(SpiFlyConstants.SPI_CONSUMER_HEADER,
"org.acme.blah#someMethod();bundle=mybundle");
Bundle providerBundle1 = mockProviderBundle("impl1", 1);
Bundle providerBundle2 = mockProviderBundle("impl2", 2);
Bundle providerBundle3 = mockProviderBundle("impl3", 3);
Bundle providerBundle4 = mockProviderBundle("impl4", 4);
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle1, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle2, new HashMap<String, Object>());
activator.registerProviderBundle("javax.xml.parsers.DocumentBuilderFactory", providerBundle3, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.MySPI", providerBundle4, new HashMap<String, Object>());
activator.registerProviderBundle("org.apache.aries.mytest.AltSPI", providerBundle4, new HashMap<String, Object>());
Bundle consumerBundle1 = mockConsumerBundle(headers1, providerBundle1, providerBundle2, providerBundle3, providerBundle4);
activator.addConsumerWeavingData(consumerBundle1, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle consumerBundle2 = mockConsumerBundle(headers2, providerBundle1, providerBundle2, providerBundle3, providerBundle4);
activator.addConsumerWeavingData(consumerBundle2, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle consumerBundle3 = mockConsumerBundle(headers3, providerBundle1, providerBundle2, providerBundle3, providerBundle4);
activator.addConsumerWeavingData(consumerBundle3, SpiFlyConstants.SPI_CONSUMER_HEADER);
Bundle spiFlyBundle = mockSpiFlyBundle(consumerBundle1, consumerBundle2, consumerBundle3,
providerBundle1, providerBundle2, providerBundle3, providerBundle4);
WeavingHook wh = new ClientWeavingHook(spiFlyBundle.getBundleContext(), activator);
testConsumerBundleWeaving(consumerBundle1, wh, Collections.singleton("impl4"), "org.apache.aries.spifly.dynamic.impl3.MyAltDocumentBuilderFactory");
testConsumerBundleWeaving(consumerBundle2, wh, Collections.singleton("olleh"), thisJVMsDBF);
testConsumerBundleWeaving(consumerBundle3, wh, Collections.<String>emptySet(), thisJVMsDBF);
}
private void testConsumerBundleWeaving(Bundle consumerBundle, WeavingHook wh, Set<String> testClientResult, String jaxpClientResult) throws Exception {
// Weave the TestClient class.
URL clsUrl = getClass().getResource("TestClient.class");
WovenClass wc = new MyWovenClass(clsUrl, TestClient.class.getName(), consumerBundle);
wh.weave(wc);
// Invoke the woven class and check that it propertly sets the TCCL so that the
// META-INF/services/org.apache.aries.mytest.MySPI file from impl2 is visible.
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {String.class});
Object result = method.invoke(cls.newInstance(), "hello");
Assert.assertEquals(testClientResult, result);
URL clsUrl2 = getClass().getResource("JaxpClient.class");
WovenClass wc2 = new MyWovenClass(clsUrl2, JaxpClient.class.getName(), consumerBundle);
wh.weave(wc2);
Class<?> cls2 = wc2.getDefinedClass();
Method method2 = cls2.getMethod("test", new Class [] {});
Class<?> result2 = (Class<?>) method2.invoke(cls2.newInstance());
Assert.assertEquals(jaxpClientResult, result2.getName());
}
@Test
public void testJAXPClientWantsJREImplementation1() throws Exception {
Bundle systembundle = mockSystemBundle();
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "javax.xml.parsers.DocumentBuilderFactory#newInstance()");
Bundle consumerBundle = mockConsumerBundle(headers, systembundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
WeavingHook wh = new ClientWeavingHook(mockSpiFlyBundle(consumerBundle, systembundle).getBundleContext(), activator);
URL clsUrl = getClass().getResource("JaxpClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.JaxpClient", consumerBundle);
wh.weave(wc);
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {});
Class<?> result = (Class<?>) method.invoke(cls.newInstance());
Assert.assertEquals("JAXP implementation from JRE", thisJVMsDBF, result.getName());
}
// If there is an alternate implementation it should always be favoured over the JRE one
@Test
public void testJAXPClientWantsAltImplementation1() throws Exception {
Bundle systembundle = mockSystemBundle();
Bundle providerBundle = mockProviderBundle("impl3", 1);
activator.registerProviderBundle("javax.xml.parsers.DocumentBuilderFactory", providerBundle, new HashMap<String, Object>());
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "javax.xml.parsers.DocumentBuilderFactory#newInstance()");
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle, systembundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
WeavingHook wh = new ClientWeavingHook(mockSpiFlyBundle(consumerBundle, providerBundle, systembundle).getBundleContext(), activator);
URL clsUrl = getClass().getResource("JaxpClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.JaxpClient", consumerBundle);
wh.weave(wc);
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {});
Class<?> result = (Class<?>) method.invoke(cls.newInstance());
Assert.assertEquals("JAXP implementation from JRE", "org.apache.aries.spifly.dynamic.impl3.MyAltDocumentBuilderFactory", result.getName());
}
@Test
public void testJAXPClientWantsJREImplementation2() throws Exception {
Bundle systembundle = mockSystemBundle();
Bundle providerBundle = mockProviderBundle("impl3", 1);
activator.registerProviderBundle("javax.xml.parsers.DocumentBuilderFactory", providerBundle, new HashMap<String, Object>());
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "javax.xml.parsers.DocumentBuilderFactory#newInstance();bundleId=0");
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle, systembundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
WeavingHook wh = new ClientWeavingHook(mockSpiFlyBundle(consumerBundle, providerBundle, systembundle).getBundleContext(), activator);
URL clsUrl = getClass().getResource("JaxpClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.JaxpClient", consumerBundle);
wh.weave(wc);
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {});
Class<?> result = (Class<?>) method.invoke(cls.newInstance());
Assert.assertEquals("JAXP implementation from JRE", thisJVMsDBF, result.getName());
}
@Test
public void testJAXPClientWantsAltImplementation2() throws Exception {
Bundle systembundle = mockSystemBundle();
Bundle providerBundle = mockProviderBundle("impl3", 1);
activator.registerProviderBundle("javax.xml.parsers.DocumentBuilderFactory", providerBundle, new HashMap<String, Object>());
Dictionary<String, String> headers = new Hashtable<String, String>();
headers.put(SpiFlyConstants.SPI_CONSUMER_HEADER, "javax.xml.parsers.DocumentBuilderFactory#newInstance();bundle=impl3");
Bundle consumerBundle = mockConsumerBundle(headers, providerBundle, systembundle);
activator.addConsumerWeavingData(consumerBundle, SpiFlyConstants.SPI_CONSUMER_HEADER);
WeavingHook wh = new ClientWeavingHook(mockSpiFlyBundle(consumerBundle, providerBundle, systembundle).getBundleContext(), activator);
URL clsUrl = getClass().getResource("JaxpClient.class");
WovenClass wc = new MyWovenClass(clsUrl, "org.apache.aries.spifly.dynamic.JaxpClient", consumerBundle);
wh.weave(wc);
Class<?> cls = wc.getDefinedClass();
Method method = cls.getMethod("test", new Class [] {});
Class<?> result = (Class<?>) method.invoke(cls.newInstance());
Assert.assertEquals("JAXP implementation from alternative bundle", "org.apache.aries.spifly.dynamic.impl3.MyAltDocumentBuilderFactory", result.getName());
}
private Bundle mockSpiFlyBundle(Bundle ... bundles) throws Exception {
return mockSpiFlyBundle("spifly", new Version(1, 0, 0), bundles);
}
private Bundle mockSpiFlyBundle(String bsn, Version version, Bundle ... bundles) throws Exception {
Bundle spiFlyBundle = EasyMock.createMock(Bundle.class);
BundleContext spiFlyBundleContext = EasyMock.createMock(BundleContext.class);
EasyMock.expect(spiFlyBundleContext.getBundle()).andReturn(spiFlyBundle).anyTimes();
List<Bundle> allBundles = new ArrayList<Bundle>(Arrays.asList(bundles));
allBundles.add(spiFlyBundle);
EasyMock.expect(spiFlyBundleContext.getBundles()).andReturn(allBundles.toArray(new Bundle [] {})).anyTimes();
EasyMock.replay(spiFlyBundleContext);
EasyMock.expect(spiFlyBundle.getSymbolicName()).andReturn(bsn).anyTimes();
EasyMock.expect(spiFlyBundle.getVersion()).andReturn(version).anyTimes();
EasyMock.expect(spiFlyBundle.getBundleId()).andReturn(Long.MAX_VALUE).anyTimes();
EasyMock.expect(spiFlyBundle.getBundleContext()).andReturn(spiFlyBundleContext).anyTimes();
EasyMock.replay(spiFlyBundle);
// Set the bundle context for testing purposes
Field bcField = BaseActivator.class.getDeclaredField("bundleContext");
bcField.setAccessible(true);
bcField.set(activator, spiFlyBundle.getBundleContext());
return spiFlyBundle;
}
private Bundle mockProviderBundle(String subdir, long id) throws Exception {
return mockProviderBundle(subdir, id, Version.emptyVersion);
}
private Bundle mockProviderBundle(String subdir, long id, Version version) throws Exception {
URL url = getClass().getResource("/" + getClass().getName().replace('.', '/') + ".class");
File classFile = new File(url.getFile());
File baseDir = new File(classFile.getParentFile(), subdir);
File directory = new File(baseDir, "/META-INF/services");
final List<String> classNames = new ArrayList<String>();
// Do a directory listing of the applicable META-INF/services directory
List<String> resources = new ArrayList<String>();
for (File f : directory.listFiles()) {
String fileName = f.getName();
if (fileName.startsWith(".") || fileName.endsWith("."))
continue;
classNames.addAll(getClassNames(f));
// Needs to be something like: META-INF/services/org.apache.aries.mytest.MySPI
String path = f.getAbsolutePath().substring(baseDir.getAbsolutePath().length());
path = path.replace('\\', '/');
if (path.startsWith("/")) {
path = path.substring(1);
}
resources.add(path);
}
// Set up the classloader that will be used by the ASM-generated code as the TCCL.
// It can load a META-INF/services file
final ClassLoader cl = new TestProviderBundleClassLoader(subdir, resources.toArray(new String [] {}));
final List<String> classResources = new ArrayList<String>();
for(String className : classNames) {
classResources.add("/" + className.replace('.', '/') + ".class");
}
BundleContext bc = EasyMock.createNiceMock(BundleContext.class);
EasyMock.replay(bc);
Bundle providerBundle = EasyMock.createMock(Bundle.class);
String bsn = subdir;
int idx = bsn.indexOf('_');
if (idx > 0) {
bsn = bsn.substring(0, idx);
}
EasyMock.expect(providerBundle.getSymbolicName()).andReturn(bsn).anyTimes();
EasyMock.expect(providerBundle.getBundleId()).andReturn(id).anyTimes();
EasyMock.expect(providerBundle.getBundleContext()).andReturn(bc).anyTimes();
EasyMock.expect(providerBundle.getVersion()).andReturn(version).anyTimes();
EasyMock.expect(providerBundle.getEntryPaths("/")).andAnswer(new IAnswer<Enumeration<String>>() {
@Override
public Enumeration<String> answer() throws Throwable {
return Collections.enumeration(classResources);
}
}).anyTimes();
EasyMock.<Class<?>>expect(providerBundle.loadClass(EasyMock.anyObject(String.class))).andAnswer(new IAnswer<Class<?>>() {
@Override
public Class<?> answer() throws Throwable {
String name = (String) EasyMock.getCurrentArguments()[0];
if (!classNames.contains(name)) {
throw new ClassCastException(name);
}
return cl.loadClass(name);
}
}).anyTimes();
EasyMock.replay(providerBundle);
return providerBundle;
}
private Collection<String> getClassNames(File f) throws IOException {
List<String> names = new ArrayList<String>();
BufferedReader br = new BufferedReader(new FileReader(f));
try {
String line = null;
while((line = br.readLine()) != null) {
names.add(line.trim());
}
} finally {
br.close();
}
return names;
}
private Bundle mockConsumerBundle(Dictionary<String, String> headers, Bundle ... otherBundles) {
// Create a mock object for the client bundle which holds the code that uses ServiceLoader.load()
// or another SPI invocation.
BundleContext bc = EasyMock.createMock(BundleContext.class);
Bundle consumerBundle = EasyMock.createMock(Bundle.class);
EasyMock.expect(consumerBundle.getSymbolicName()).andReturn("testConsumer").anyTimes();
EasyMock.expect(consumerBundle.getHeaders()).andReturn(headers).anyTimes();
EasyMock.expect(consumerBundle.getBundleContext()).andReturn(bc).anyTimes();
EasyMock.expect(consumerBundle.getBundleId()).andReturn(Long.MAX_VALUE).anyTimes();
EasyMock.expect(consumerBundle.adapt(BundleRevision.class)).andReturn(null).anyTimes();
EasyMock.replay(consumerBundle);
List<Bundle> allBundles = new ArrayList<Bundle>(Arrays.asList(otherBundles));
allBundles.add(consumerBundle);
EasyMock.expect(bc.getBundles()).andReturn(allBundles.toArray(new Bundle [] {})).anyTimes();
EasyMock.replay(bc);
return consumerBundle;
}
private Bundle mockSystemBundle() {
Bundle systemBundle = EasyMock.createMock(Bundle.class);
EasyMock.expect(systemBundle.getBundleId()).andReturn(0L).anyTimes();
EasyMock.expect(systemBundle.getSymbolicName()).andReturn("system.bundle").anyTimes();
EasyMock.replay(systemBundle);
return systemBundle;
}
// A classloader that loads anything starting with org.apache.aries.spifly.dynamic.impl1 from it
// and the rest from the parent. This is to mimic a bundle that holds a specific SPI implementation.
public static class TestProviderBundleClassLoader extends URLClassLoader {
private final List<String> resources;
private final String prefix;
private final String classPrefix;
private final Map<String, Class<?>> loadedClasses = new ConcurrentHashMap<String, Class<?>>();
public TestProviderBundleClassLoader(String subdir, String ... resources) {
super(new URL [] {}, TestProviderBundleClassLoader.class.getClassLoader());
this.prefix = TestProviderBundleClassLoader.class.getPackage().getName().replace('.', '/') + "/" + subdir + "/";
this.classPrefix = prefix.replace('/', '.');
this.resources = Arrays.asList(resources);
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if (name.startsWith(classPrefix))
return loadClassLocal(name);
return super.loadClass(name);
}
@Override
protected synchronized Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (name.startsWith(classPrefix)) {
Class<?> cls = loadClassLocal(name);
if (resolve)
resolveClass(cls);
return cls;
}
return super.loadClass(name, resolve);
}
protected Class<?> loadClassLocal(String name) throws ClassNotFoundException {
Class<?> prevLoaded = loadedClasses.get(name);
if (prevLoaded != null)
return prevLoaded;
URL res = TestProviderBundleClassLoader.class.getClassLoader().getResource(name.replace('.', '/') + ".class");
try {
byte[] bytes = Streams.suck(res.openStream());
Class<?> cls = defineClass(name, bytes, 0, bytes.length);
loadedClasses.put(name, cls);
return cls;
} catch (Exception e) {
throw new ClassNotFoundException(name, e);
}
}
@Override
public URL findResource(String name) {
if (resources.contains(name)) {
return getClass().getClassLoader().getResource(prefix + name);
} else {
return super.findResource(name);
}
}
@Override
public Enumeration<URL> findResources(String name) throws IOException {
if (resources.contains(name)) {
return getClass().getClassLoader().getResources(prefix + name);
} else {
return super.findResources(name);
}
}
}
private static class MyWovenClass implements WovenClass {
byte [] bytes;
final String className;
final Bundle bundleContainingOriginalClass;
List<String> dynamicImports = new ArrayList<String>();
boolean weavingComplete = false;
private MyWovenClass(URL clazz, String name, Bundle bundle) throws Exception {
bytes = Streams.suck(clazz.openStream());
className = name;
bundleContainingOriginalClass = bundle;
}
@Override
public byte[] getBytes() {
return bytes;
}
@Override
public void setBytes(byte[] newBytes) {
bytes = newBytes;
}
@Override
public List<String> getDynamicImports() {
return dynamicImports;
}
@Override
public boolean isWeavingComplete() {
return weavingComplete;
}
@Override
public String getClassName() {
return className;
}
@Override
public ProtectionDomain getProtectionDomain() {
return null;
}
@Override
public Class<?> getDefinedClass() {
try {
weavingComplete = true;
return new MyWovenClassClassLoader(className, getBytes(), getClass().getClassLoader(), bundleContainingOriginalClass).loadClass(className);
} catch (ClassNotFoundException e) {
e.printStackTrace();
return null;
}
}
@Override
public BundleWiring getBundleWiring() {
BundleWiring bw = EasyMock.createMock(BundleWiring.class);
EasyMock.expect(bw.getBundle()).andReturn(bundleContainingOriginalClass);
EasyMock.expect(bw.getClassLoader()).andReturn(getClass().getClassLoader());
EasyMock.replay(bw);
return bw;
}
}
private static class MyWovenClassClassLoader extends ClassLoader implements BundleReference {
private final String className;
private final Bundle bundle;
private final byte [] bytes;
private Class<?> wovenClass;
public MyWovenClassClassLoader(String className, byte[] bytes, ClassLoader parent, Bundle bundle) {
super(parent);
this.className = className;
this.bundle = bundle;
this.bytes = bytes;
}
@Override
protected synchronized Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException {
if (name.equals(className)) {
if (wovenClass == null)
wovenClass = defineClass(className, bytes, 0, bytes.length);
return wovenClass;
} else {
return super.loadClass(name, resolve);
}
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
return loadClass(name, false);
}
@Override
public Bundle getBundle() {
return bundle;
}
}
}
| WouterBanckenACA/aries | spi-fly/spi-fly-dynamic-bundle/src/test/java/org/apache/aries/spifly/dynamic/ClientWeavingHookTest.java | Java | apache-2.0 | 45,645 |
/**
* Copyright 2016 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.commons;
import com.github.ambry.router.AsyncWritableChannel;
import com.github.ambry.router.Callback;
import com.github.ambry.router.FutureResult;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Tests functionality of {@link ByteBufferReadableStreamChannel}.
*/
public class ByteBufferReadableStreamChannelTest {
/**
* Tests the common case read operations i.e
* 1. Create {@link ByteBufferReadableStreamChannel} with random bytes.
* 2. Calls the different read operations of {@link ByteBufferReadableStreamChannel} and checks that the data read
* matches the data used to create the {@link ByteBufferReadableStreamChannel}.
* @throws Exception
*/
@Test
public void commonCaseTest() throws Exception {
ByteBuffer content = ByteBuffer.wrap(fillRandomBytes(new byte[1024]));
ByteBufferReadableStreamChannel readableStreamChannel = new ByteBufferReadableStreamChannel(content);
assertTrue("ByteBufferReadableStreamChannel is not open", readableStreamChannel.isOpen());
assertEquals("Size returned by ByteBufferReadableStreamChannel did not match source array size", content.capacity(),
readableStreamChannel.getSize());
ByteBufferAsyncWritableChannel writeChannel = new ByteBufferAsyncWritableChannel();
ReadIntoCallback callback = new ReadIntoCallback();
Future<Long> future = readableStreamChannel.readInto(writeChannel, callback);
ByteBuffer contentWrapper = ByteBuffer.wrap(content.array());
while (contentWrapper.hasRemaining()) {
ByteBuffer recvdContent = writeChannel.getNextChunk();
assertNotNull("Written content lesser than original content", recvdContent);
while (recvdContent.hasRemaining()) {
assertTrue("Written content is more than original content", contentWrapper.hasRemaining());
assertEquals("Unexpected byte", contentWrapper.get(), recvdContent.get());
}
writeChannel.resolveOldestChunk(null);
}
assertNull("There should have been no more data in the channel", writeChannel.getNextChunk(0));
writeChannel.close();
callback.awaitCallback();
if (callback.exception != null) {
throw callback.exception;
}
long futureBytesRead = future.get();
assertEquals("Total bytes written does not match (callback)", content.limit(), callback.bytesRead);
assertEquals("Total bytes written does not match (future)", content.limit(), futureBytesRead);
}
/**
* Tests that the right exceptions are thrown when reading into {@link AsyncWritableChannel} fails.
* @throws Exception
*/
@Test
public void readIntoAWCFailureTest() throws Exception {
String errMsg = "@@ExpectedExceptionMessage@@";
byte[] in = fillRandomBytes(new byte[1]);
// Bad AWC.
ByteBufferReadableStreamChannel readableStreamChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(in));
ReadIntoCallback callback = new ReadIntoCallback();
try {
readableStreamChannel.readInto(new BadAsyncWritableChannel(new IOException(errMsg)), callback).get();
fail("Should have failed because BadAsyncWritableChannel would have thrown exception");
} catch (ExecutionException e) {
Exception exception = (Exception) Utils.getRootCause(e);
assertEquals("Exception message does not match expected (future)", errMsg, exception.getMessage());
callback.awaitCallback();
assertEquals("Exception message does not match expected (callback)", errMsg, callback.exception.getMessage());
}
// Reading more than once.
readableStreamChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(in));
ByteBufferAsyncWritableChannel writeChannel = new ByteBufferAsyncWritableChannel();
readableStreamChannel.readInto(writeChannel, null);
try {
readableStreamChannel.readInto(writeChannel, null);
fail("Should have failed because readInto cannot be called more than once");
} catch (IllegalStateException e) {
// expected. Nothing to do.
}
// Read after close.
readableStreamChannel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(in));
readableStreamChannel.close();
writeChannel = new ByteBufferAsyncWritableChannel();
callback = new ReadIntoCallback();
try {
readableStreamChannel.readInto(writeChannel, callback).get();
fail("ByteBufferReadableStreamChannel has been closed, so read should have thrown ClosedChannelException");
} catch (ExecutionException e) {
Exception exception = (Exception) Utils.getRootCause(e);
assertTrue("Exception is not ClosedChannelException", exception instanceof ClosedChannelException);
callback.awaitCallback();
assertEquals("Exceptions of callback and future differ", exception.getMessage(), callback.exception.getMessage());
}
}
/**
* Tests behavior of read operations on some corner cases.
* <p/>
* Corner case list:
* 1. Blob size is 0.
* @throws Exception
*/
@Test
public void readAndWriteCornerCasesTest() throws Exception {
// 0 sized blob.
ByteBufferReadableStreamChannel readableStreamChannel = new ByteBufferReadableStreamChannel(ByteBuffer.allocate(0));
assertTrue("ByteBufferReadableStreamChannel is not open", readableStreamChannel.isOpen());
assertEquals("Size returned by ByteBufferReadableStreamChannel is not 0", 0, readableStreamChannel.getSize());
ByteBufferAsyncWritableChannel writeChannel = new ByteBufferAsyncWritableChannel();
ReadIntoCallback callback = new ReadIntoCallback();
Future<Long> future = readableStreamChannel.readInto(writeChannel, callback);
ByteBuffer chunk = writeChannel.getNextChunk(0);
while (chunk != null) {
writeChannel.resolveOldestChunk(null);
chunk = writeChannel.getNextChunk(0);
}
callback.awaitCallback();
assertEquals("There should have no bytes to read (future)", 0, future.get().longValue());
assertEquals("There should have no bytes to read (callback)", 0, callback.bytesRead);
if (callback.exception != null) {
throw callback.exception;
}
writeChannel.close();
readableStreamChannel.close();
}
/**
* Tests that no exceptions are thrown on repeating idempotent operations. Does <b><i>not</i></b> currently test that
* state changes are idempotent.
* @throws IOException
*/
@Test
public void idempotentOperationsTest() throws IOException {
byte[] in = fillRandomBytes(new byte[1]);
ByteBufferReadableStreamChannel byteBufferReadableStreamChannel =
new ByteBufferReadableStreamChannel(ByteBuffer.wrap(in));
assertTrue("ByteBufferReadableStreamChannel is not open", byteBufferReadableStreamChannel.isOpen());
byteBufferReadableStreamChannel.close();
assertFalse("ByteBufferReadableStreamChannel is not closed", byteBufferReadableStreamChannel.isOpen());
// should not throw exception.
byteBufferReadableStreamChannel.close();
assertFalse("ByteBufferReadableStreamChannel is not closed", byteBufferReadableStreamChannel.isOpen());
}
// helpers
// general
/**
* Fills random bytes into {@code in}.
* @param in the byte array that needs to be filled with random bytes.
* @return {@code in} filled with random bytes.
*/
private byte[] fillRandomBytes(byte[] in) {
new Random().nextBytes(in);
return in;
}
}
/**
* Callback for read operations on {@link ByteBufferReadableStreamChannel}.
*/
class ReadIntoCallback implements Callback<Long> {
public volatile long bytesRead;
public volatile Exception exception;
private final AtomicBoolean callbackInvoked = new AtomicBoolean(false);
private final CountDownLatch latch = new CountDownLatch(1);
@Override
public void onCompletion(Long result, Exception exception) {
if (callbackInvoked.compareAndSet(false, true)) {
bytesRead = result;
this.exception = exception;
latch.countDown();
} else {
this.exception = new IllegalStateException("Callback invoked more than once");
}
}
/**
* Waits for the callback to arrive for a limited amount of time.
* @throws InterruptedException
* @throws TimeoutException
*/
void awaitCallback() throws InterruptedException, TimeoutException {
if (!latch.await(1, TimeUnit.SECONDS)) {
throw new TimeoutException("Waiting too long for callback to arrive");
}
}
}
/**
* A {@link AsyncWritableChannel} that throws a custom exception (provided at construction time) on a call to
* {@link #write(ByteBuffer, Callback)}.
*/
class BadAsyncWritableChannel implements AsyncWritableChannel {
private final Exception exceptionToThrow;
private final AtomicBoolean isOpen = new AtomicBoolean(true);
/**
* Creates an instance of BadAsyncWritableChannel that throws {@code exceptionToThrow} on write.
* @param exceptionToThrow the {@link Exception} to throw on write.
*/
public BadAsyncWritableChannel(Exception exceptionToThrow) {
this.exceptionToThrow = exceptionToThrow;
}
@Override
public Future<Long> write(ByteBuffer src, Callback<Long> callback) {
if (exceptionToThrow instanceof RuntimeException) {
throw (RuntimeException) exceptionToThrow;
} else {
return markFutureInvokeCallback(callback, 0, exceptionToThrow);
}
}
@Override
public boolean isOpen() {
return isOpen.get();
}
@Override
public void close() throws IOException {
isOpen.set(false);
}
/**
* Creates and marks a future as done and invoked the callback with paramaters {@code totalBytesWritten} and
* {@code Exception}.
* @param callback the {@link Callback} to invoke.
* @param totalBytesWritten the number of bytes successfully written.
* @param exception the {@link Exception} that occurred if any.
* @return the {@link Future} that will contain the result of the operation.
*/
private Future<Long> markFutureInvokeCallback(Callback<Long> callback, long totalBytesWritten, Exception exception) {
FutureResult<Long> futureResult = new FutureResult<Long>();
futureResult.done(totalBytesWritten, exception);
if (callback != null) {
callback.onCompletion(totalBytesWritten, exception);
}
return futureResult;
}
}
| vgkholla/ambry | ambry-commons/src/test/java/com.github.ambry.commons/ByteBufferReadableStreamChannelTest.java | Java | apache-2.0 | 11,153 |
// Copyright 2012 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "syzygy/experimental/pdb_dumper/pdb_module_info_stream_dumper.h"
#include "syzygy/common/align.h"
#include "syzygy/experimental/pdb_dumper/pdb_dump_util.h"
#include "syzygy/experimental/pdb_dumper/pdb_symbol_record_dumper.h"
#include "syzygy/pdb/pdb_dbi_stream.h"
#include "syzygy/pdb/pdb_stream.h"
#include "syzygy/pdb/pdb_symbol_record.h"
#include "syzygy/pe/cvinfo_ext.h"
namespace pdb {
namespace cci = Microsoft_Cci_Pdb;
namespace {
// Read the file checksum substream from a module info stream. The filenames
// used by this module will be stored in a map.
// @param file_names The map containing the filenames listed in the name stream
// of the PDB.
// @param stream The stream containing the checksum substream.
// @param length The length of the checksum substream.
// @param module_files The map where the filenames should be saved.
// @returns true on success, false on error.
bool ReadFileChecksums(const OffsetStringMap& file_names,
pdb::PdbStream* stream,
size_t length,
OffsetStringMap* module_files) {
DCHECK(stream != NULL);
DCHECK(module_files != NULL);
size_t base = stream->pos();
size_t end = base + length;
while (stream->pos() < end) {
cci::CV_FileCheckSum checksum = {};
size_t pos = stream->pos() - base;
if (!stream->Read(&checksum, 1)) {
LOG(ERROR) << "Unable to read file checksum.";
return false;
}
OffsetStringMap::const_iterator it(file_names.find(checksum.name));
if (it == file_names.end()) {
LOG(ERROR) << "There is a checksum reference for a file that is not in "
<< "the list of files used by this module.";
return false;
}
module_files->insert(std::make_pair(pos, it->second));
// Skip the checksum and align.
if (!stream->Seek(common::AlignUp(stream->pos() + checksum.len, 4))) {
LOG(ERROR) << "Unable to seek past file checksum.";
return false;
}
}
return true;
}
// Dump the line information from a line information substream.
// @param file_names The map containing the filenames used by this module.
// @param out The output where the data should be dumped.
// @param stream The stream containing the line information.
// @param length The length of the line information substream.
// @param indent_level The indentation level to use.
// @returns true on success, false on error.
bool DumpLineInfo(const OffsetStringMap& file_names,
FILE* out,
PdbStream* stream,
size_t length,
uint8 indent_level) {
DCHECK(stream != NULL);
size_t base = stream->pos();
// Read the header.
cci::CV_LineSection line_section = {};
if (!stream->Read(&line_section, 1)) {
LOG(ERROR) << "Unable to read line section.";
return false;
}
size_t end = base + length;
while (stream->pos() < end) {
cci::CV_SourceFile source_file = {};
if (!stream->Read(&source_file, 1)) {
LOG(ERROR) << "Unable to read source info.";
return false;
}
std::vector<cci::CV_Line> lines(source_file.count);
if (lines.size() && !stream->Read(&lines, lines.size())) {
LOG(ERROR) << "Unable to read line records.";
return false;
}
std::vector<cci::CV_Column> columns(source_file.count);
if ((line_section.flags & cci::CV_LINES_HAVE_COLUMNS) != 0 &&
!stream->Read(&columns, columns.size())) {
LOG(ERROR) << "Unable to read column records.";
return false;
}
OffsetStringMap::const_iterator it(file_names.find(source_file.index));
if (it == file_names.end()) {
LOG(ERROR) << "Unable to find an index in the list of filenames used by "
<< "this module.";
return false;
}
DumpIndentedText(out,
indent_level,
"Section %d, offset 0x%04X.\n",
line_section.sec,
line_section.off);
for (size_t i = 0; i < lines.size(); ++i) {
if (columns[i].offColumnStart != 0) {
DumpIndentedText(out, indent_level,
"%s(%d, %d): line and column at %d:%04X.\n",
it->second.c_str(),
lines[i].flags & cci::linenumStart,
columns[i].offColumnStart,
line_section.sec,
line_section.off + lines[i].offset);
} else {
DumpIndentedText(out,
indent_level,
"%s(%d): line at %d:%04X.\n",
it->second.c_str(),
lines[i].flags & cci::linenumStart,
line_section.sec,
line_section.off + lines[i].offset);
}
}
}
return true;
}
// Dump the line information substream from a module info stream.
// @param name_map The map containing the filenames listed in the name stream of
// the PDB.
// @param out The output where the data should be dumped.
// @param stream The stream containing the line information.
// @param start The position where the line information start in the stream.
// @param lines_bytes The length of the line information substream.
// @param indent_level The level of indentation to use.
void DumpLines(const OffsetStringMap& name_map,
FILE* out,
pdb::PdbStream* stream,
size_t start,
size_t lines_bytes,
uint8 indent_level) {
DCHECK(stream != NULL);
if (lines_bytes == 0)
return;
if (!stream->Seek(start)) {
LOG(ERROR) << "Unable to seek to line info.";
return;
}
// The line information is arranged as a back-to-back run of {type, len}
// prefixed chunks. The types are DEBUG_S_FILECHKSMS and DEBUG_S_LINES.
// The first of these provides file names and a file content checksum, where
// each record is identified by its index into its chunk (excluding type
// and len).
size_t end = start + lines_bytes;
OffsetStringMap file_names;
while (stream->pos() < end) {
uint32 line_info_type = 0;
uint32 length = 0;
if (!stream->Read(&line_info_type, 1) || !stream->Read(&length, 1)) {
LOG(ERROR) << "Unable to read line info signature.";
return;
}
switch (line_info_type) {
case cci::DEBUG_S_FILECHKSMS:
if (!ReadFileChecksums(name_map, stream, length, &file_names))
return;
break;
case cci::DEBUG_S_LINES:
if (!DumpLineInfo(file_names, out, stream, length, indent_level))
return;
break;
default:
LOG(ERROR) << "Unsupported line information type " << line_info_type
<< ".";
return;
}
}
}
} // namespace
void DumpModuleInfoStream(const DbiModuleInfo& module_info,
const OffsetStringMap& name_table,
FILE* out,
PdbStream* stream) {
DCHECK(stream != NULL);
uint8 indent_level = 1;
DumpIndentedText(out,
indent_level,
"Module name: %s\n",
module_info.module_name().c_str());
DumpIndentedText(out,
indent_level,
"Object name: %s\n",
module_info.object_name().c_str());
uint32 type = 0;
if (!stream->Read(&type, 1) || type != cci::C13) {
LOG(ERROR) << "Unexpected symbol stream type " << type << ".";
return;
}
SymbolRecordVector symbols;
ReadSymbolRecord(stream,
module_info.module_info_base().symbol_bytes - sizeof(type),
&symbols);
DumpIndentedText(out, indent_level + 1, "Symbol records:\n");
DumpSymbolRecords(out, stream, symbols, indent_level + 2);
DumpIndentedText(out, indent_level + 1, "Lines:\n");
DumpLines(name_table,
out,
stream,
module_info.module_info_base().symbol_bytes,
module_info.module_info_base().lines_bytes,
indent_level + 2);
}
} // namespace pdb
| wangming28/syzygy | syzygy/experimental/pdb_dumper/pdb_module_info_stream_dumper.cc | C++ | apache-2.0 | 8,662 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.smile;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
import com.fasterxml.jackson.dataformat.smile.SmileGenerator;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentGenerator;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Reader;
/**
* A Smile based content implementation using Jackson.
*/
public class SmileXContent implements XContent {
public static XContentBuilder contentBuilder() throws IOException {
return XContentBuilder.builder(smileXContent);
}
final static SmileFactory smileFactory;
public final static SmileXContent smileXContent;
static {
smileFactory = new SmileFactory();
smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets
smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method
smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
smileXContent = new SmileXContent();
}
private SmileXContent() {
}
@Override
public XContentType type() {
return XContentType.SMILE;
}
@Override
public byte streamSeparator() {
return (byte) 0xFF;
}
@Override
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
}
@Override
public XContentParser createParser(String content) throws IOException {
return new SmileXContentParser(smileFactory.createParser(new FastStringReader(content)));
}
@Override
public XContentParser createParser(InputStream is) throws IOException {
return new SmileXContentParser(smileFactory.createParser(is));
}
@Override
public XContentParser createParser(byte[] data) throws IOException {
return new SmileXContentParser(smileFactory.createParser(data));
}
@Override
public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
return new SmileXContentParser(smileFactory.createParser(data, offset, length));
}
@Override
public XContentParser createParser(BytesReference bytes) throws IOException {
if (bytes.hasArray()) {
return createParser(bytes.array(), bytes.arrayOffset(), bytes.length());
}
return createParser(bytes.streamInput());
}
@Override
public XContentParser createParser(Reader reader) throws IOException {
return new SmileXContentParser(smileFactory.createParser(reader));
}
}
| camilojd/elasticsearch | core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java | Java | apache-2.0 | 4,170 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
const (
// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
// in the Annotations of a Pod.
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
// to all containers of a pod.
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
// to one container of a pod.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
// CreatedByAnnotation represents the key used to store the spec(json)
// used to create the resource.
CreatedByAnnotation = "kubernetes.io/created-by"
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
// the kubelet. Pods with other sysctls will fail to launch.
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
// will fail to launch.
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
// ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
// an object (e.g. secret, config map) before fetching it again from apiserver.
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
// AffinityAnnotationKey represents the key of affinity data (json serialized)
// in the Annotations of a Pod.
// TODO: remove when alpha support for affinity is removed
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
// annotation key prefix used to identify non-convertible json paths.
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
)
| danielromlein/dashboard | vendor/k8s.io/client-go/pkg/api/annotation_key_constants.go | GO | apache-2.0 | 3,584 |
# -*- coding: utf-8 -*-
###############################################################################
#
# FilterPlacesByTopLevelCategory
# Find places by top-level category and near specified latitude, longitude coordinates.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FilterPlacesByTopLevelCategory(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FilterPlacesByTopLevelCategory Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FilterPlacesByTopLevelCategory, self).__init__(temboo_session, '/Library/Factual/FilterPlacesByTopLevelCategory')
def new_input_set(self):
return FilterPlacesByTopLevelCategoryInputSet()
def _make_result_set(self, result, path):
return FilterPlacesByTopLevelCategoryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FilterPlacesByTopLevelCategoryChoreographyExecution(session, exec_id, path)
class FilterPlacesByTopLevelCategoryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FilterPlacesByTopLevelCategory
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((optional, string) The API Key provided by Factual (AKA the OAuth Consumer Key).)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((optional, string) The API Secret provided by Factual (AKA the OAuth Consumer Secret).)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('APISecret', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((required, string) Enter a Factual top-level category to narrow the search results. See Choreo doc for a list of Factual top-level categories.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Category', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) Enter latitude coordinates of the location defining the center of the search radius.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) Enter longitude coordinates of the location defining the center of the search radius.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Longitude', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((optional, string) A search string (i.e. Starbucks))
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Query', value)
def set_Radius(self, value):
"""
Set the value of the Radius input for this Choreo. ((required, integer) Provide the radius (in meters, and centered on the latitude-longitude coordinates specified) for which search results will be returned.)
"""
super(FilterPlacesByTopLevelCategoryInputSet, self)._set_input('Radius', value)
class FilterPlacesByTopLevelCategoryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FilterPlacesByTopLevelCategory Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Factual.)
"""
return self._output.get('Response', None)
class FilterPlacesByTopLevelCategoryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FilterPlacesByTopLevelCategoryResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Factual/FilterPlacesByTopLevelCategory.py | Python | apache-2.0 | 5,132 |
/*
* Copyright 2012-2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigure;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import org.springframework.beans.factory.BeanClassLoaderAware;
import org.springframework.context.ResourceLoaderAware;
import org.springframework.context.annotation.DeferredImportSelector;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.AnnotationAttributes;
import org.springframework.core.annotation.Order;
import org.springframework.core.io.ResourceLoader;
import org.springframework.core.io.support.SpringFactoriesLoader;
import org.springframework.core.type.AnnotationMetadata;
import org.springframework.util.Assert;
/**
* {@link DeferredImportSelector} to handle {@link EnableAutoConfiguration
* auto-configuration}.
*
* @author Phillip Webb
* @author Andy Wilkinson
* @see EnableAutoConfiguration
*/
@Order(Ordered.LOWEST_PRECEDENCE)
class EnableAutoConfigurationImportSelector implements DeferredImportSelector,
BeanClassLoaderAware, ResourceLoaderAware {
private ClassLoader beanClassLoader;
private ResourceLoader resourceLoader;
@Override
public String[] selectImports(AnnotationMetadata metadata) {
try {
AnnotationAttributes attributes = AnnotationAttributes.fromMap(metadata
.getAnnotationAttributes(EnableAutoConfiguration.class.getName(),
true));
Assert.notNull(attributes, "No auto-configuration attributes found. Is "
+ metadata.getClassName()
+ " annotated with @EnableAutoConfiguration?");
// Find all possible auto configuration classes, filtering duplicates
List<String> factories = new ArrayList<String>(new LinkedHashSet<String>(
SpringFactoriesLoader.loadFactoryNames(EnableAutoConfiguration.class,
this.beanClassLoader)));
// Remove those specifically disabled
factories.removeAll(Arrays.asList(attributes.getStringArray("exclude")));
// Sort
factories = new AutoConfigurationSorter(this.resourceLoader)
.getInPriorityOrder(factories);
return factories.toArray(new String[factories.size()]);
}
catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.beanClassLoader = classLoader;
}
@Override
public void setResourceLoader(ResourceLoader resourceLoader) {
this.resourceLoader = resourceLoader;
}
}
| domix/spring-boot | spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/EnableAutoConfigurationImportSelector.java | Java | apache-2.0 | 3,065 |
/*
* Copyright 2004-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.compass.annotations.test.property;
import java.util.List;
import org.compass.annotations.Searchable;
import org.compass.annotations.SearchableId;
import org.compass.annotations.SearchableProperty;
/**
* @author kimchy
*/
@Searchable
public class A {
@SearchableId
long id;
@SearchableProperty
List<String> values;
}
| baboune/compass | src/main/test/org/compass/annotations/test/property/A.java | Java | apache-2.0 | 973 |
package main
import "github.com/mackerelio/mackerel-plugin-gearmand/lib"
func main() {
mpgearmand.Do()
}
| mackerelio/mackerel-agent-plugins | mackerel-plugin-gearmand/main.go | GO | apache-2.0 | 108 |
##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext'
import sys
from types import FunctionType
from types import MethodType
from types import ModuleType
import weakref
from . import advice as advicemod
from .interface import InterfaceClass
from .interface import SpecificationBase
from .interface import Specification
from ._compat import CLASS_TYPES as DescriptorAwareMetaClasses
from ._compat import PYTHON3
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
_ADVICE_ERROR = ('Class advice impossible in Python3. '
'Use the @%s class decorator instead.')
_ADVICE_WARNING = ('The %s API is deprecated, and will not work in Python3 '
'Use the @%s class decorator instead.')
class named(object):
def __init__(self, name):
self.name = name
def __call__(self, ob):
ob.__component_name__ = self.name
return ob
class Declaration(Specification):
"""Interface declarations"""
def __init__(self, *interfaces):
Specification.__init__(self, _normalizeargs(interfaces))
def changed(self, originally_changed):
Specification.changed(self, originally_changed)
try:
del self._v_attrs
except AttributeError:
pass
def __contains__(self, interface):
"""Test whether an interface is in the specification
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
"""
return Declaration(
*[i for i in self.interfaces()
if not [j for j in other.interfaces()
if i.extends(j, 0)]
]
)
def __add__(self, other):
"""Add two specifications or a specification and an interface
"""
seen = {}
result = []
for i in self.interfaces():
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(Declaration):
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
__name__ = '?'
@classmethod
def named(cls, name, *interfaces):
# Implementation method: Produce an Implements interface with
# a fully fleshed out __name__ before calling the constructor, which
# sets bases to the given interfaces and which may pass this object to
# other objects (e.g., to adjust dependents). If they're sorting or comparing
# by name, this needs to be set.
inst = cls.__new__(cls)
inst.__name__ = name
inst.__init__(*interfaces)
return inst
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def __cmp(self, other):
# Yes, I did mean to name this __cmp, rather than __cmp__.
# It is a private method used by __lt__ and __gt__.
# This is based on, and compatible with, InterfaceClass.
# (The two must be mutually comparable to be able to work in e.g., BTrees.)
# Instances of this class generally don't have a __module__ other than
# `zope.interface.declarations`, whereas they *do* have a __name__ that is the
# fully qualified name of the object they are representing.
# Note, though, that equality and hashing are still identity based. This
# accounts for things like nested objects that have the same name (typically
# only in tests) and is consistent with pickling. As far as comparisons to InterfaceClass
# goes, we'll never have equal name and module to those, so we're still consistent there.
# Instances of this class are essentially intended to be unique and are
# heavily cached (note how our __reduce__ handles this) so having identity
# based hash and eq should also work.
if other is None:
return -1
n1 = (self.__name__, self.__module__)
n2 = (getattr(other, '__name__', ''), getattr(other, '__module__', ''))
# This spelling works under Python3, which doesn't have cmp().
return (n1 > n2) - (n1 < n2)
def __hash__(self):
return Declaration.__hash__(self)
# We want equality to be based on identity. However, we can't actually
# implement __eq__/__ne__ to do this because sometimes we get wrapped in a proxy.
# We need to let the proxy types implement these methods so they can handle unwrapping
# and then rely on: (1) the interpreter automatically changing `implements == proxy` into
# `proxy == implements` (which will call proxy.__eq__ to do the unwrapping) and then
# (2) the default equality semantics being identity based.
def __lt__(self, other):
c = self.__cmp(other)
return c < 0
def __le__(self, other):
c = self.__cmp(other)
return c <= 0
def __gt__(self, other):
c = self.__cmp(other)
return c > 0
def __ge__(self, other):
c = self.__cmp(other)
return c >= 0
def _implements_name(ob):
# Return the __name__ attribute to be used by its __implemented__
# property.
# This must be stable for the "same" object across processes
# because it is used for sorting. It needn't be unique, though, in cases
# like nested classes named Foo created by different functions, because
# equality and hashing is still based on identity.
# It might be nice to use __qualname__ on Python 3, but that would produce
# different values between Py2 and Py3.
return (getattr(ob, '__module__', '?') or '?') + \
'.' + (getattr(ob, '__name__', '?') or '?')
def implementedByFallback(cls):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
try:
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
spec_name = _implements_name(cls)
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements.named(spec_name, *_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements.named(spec_name, *[implementedBy(c) for c in bases])
spec.inherit = cls
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and
'__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
implementedBy = implementedByFallback
def classImplementsOnly(cls, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations.
"""
spec = implementedBy(cls)
spec.declared = ()
spec.inherit = None
classImplements(cls, *interfaces)
def classImplements(cls, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared.
"""
spec = implementedBy(cls)
spec.declared += tuple(_normalizeargs(interfaces))
# compute the bases
bases = []
seen = {}
for b in spec.declared:
if b not in seen:
seen[b] = 1
bases.append(b)
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen[b] = 1
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
classImplements(cls, *interfaces)
return cls
class implementer:
"""Declare the interfaces implemented by instances of a class.
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
@implementer(I1)
class C(object):
pass
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
classImplements(ob, *self.interfaces)
return ob
spec_name = _implements_name(ob)
spec = Implements.named(spec_name, *self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only:
"""Declare the only interfaces implemented by instances of a class
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
@implementer_only(I1)
class C(object): pass
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method pr function....
raise ValueError('The implementer_only decorator is not '
'supported for methods or functions.')
else:
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, classImplements):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
frame = sys._getframe(2)
locals = frame.f_locals
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if locals is frame.f_globals or '__module__' not in locals:
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, classImplements
advicemod.addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer')
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer_only')
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement __provides__, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces):
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
"""
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
# Note that we can't get here from Py3k tests: there is no normal
# class which isn't descriptor aware.
if not isinstance(object,
DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface):
""" Removes a directly provided interface from an object.
"""
directlyProvides(object, directlyProvidedBy(object) - interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
class ClassProvidesBaseFallback(object):
def __get__(self, inst, cls):
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
ClassProvidesBasePy = ClassProvidesBaseFallback # BBB
ClassProvidesBase = ClassProvidesBaseFallback
# Try to get C base:
try:
from ._zope_interface_coptimizations import ClassProvidesBase
except ImportError:
pass
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class __provides__
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
"""
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an ``IDeclaration``.
"""
provides = getattr(object, "__provides__", None)
if (provides is None # no spec
or
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'provider')
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a "
"class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
advicemod.addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider:
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
# XXX: is this a fossil? Nobody calls it, no unit tests exercise it, no
# doctests import it, and the package __init__ doesn't import it.
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
"""
return Provides(cls, direct) # pragma: no cover fossil
def getObjectSpecificationFallback(ob):
provides = getattr(ob, '__provides__', None)
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
getObjectSpecification = getObjectSpecificationFallback
def providedByFallback(ob):
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
providedBy = providedByFallback
class ObjectSpecificationDescriptorFallback(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces peovided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
"""
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
ObjectSpecificationDescriptor = ObjectSpecificationDescriptorFallback
##############################################################################
def _normalizeargs(sequence, output = None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = Declaration()
try:
from ._zope_interface_coptimizations import (
getObjectSpecification,
implementedBy,
ObjectSpecificationDescriptor,
providedBy,
)
except ImportError:
pass
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
| smmribeiro/intellij-community | plugins/hg4idea/testData/bin/mercurial/thirdparty/zope/interface/declarations.py | Python | apache-2.0 | 30,880 |
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans.steps.missing;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.pentaho.di.core.logging.LogChannel;
import org.pentaho.di.core.util.AbstractStepMeta;
import org.pentaho.di.trans.Trans;
import org.pentaho.di.trans.TransMeta;
import org.pentaho.di.trans.step.StepDataInterface;
import org.pentaho.di.trans.step.StepInterface;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.step.StepMetaInterface;
import org.pentaho.di.trans.steps.StepMockUtil;
import org.pentaho.di.trans.steps.datagrid.DataGridMeta;
import org.pentaho.di.trans.steps.mock.StepMockHelper;
import static org.junit.Assert.assertFalse;
public class MissingTransStepTest {
@Test
public void testInit() {
StepMetaInterface stepMetaInterface = new AbstractStepMeta() {
@Override
public void setDefault() { }
@Override
public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int copyNr,
TransMeta transMeta,
Trans trans ) {
return null;
}
};
StepMeta stepMeta = new StepMeta();
stepMeta.setName( "TestMetaStep" );
StepDataInterface stepDataInterface = mock( StepDataInterface.class );
Trans trans = new Trans();
LogChannel log = mock( LogChannel.class );
doAnswer( new Answer<Void>() {
public Void answer( InvocationOnMock invocation ) {
return null;
}
} ).when( log ).logError( anyString() );
trans.setLog( log );
TransMeta transMeta = new TransMeta();
transMeta.addStep( stepMeta );
MissingTransStep step = createAndInitStep( stepMetaInterface, stepDataInterface );
assertFalse( step.init( stepMetaInterface, stepDataInterface ) );
}
private MissingTransStep createAndInitStep( StepMetaInterface meta, StepDataInterface data ) {
StepMockHelper<DataGridMeta, StepDataInterface> helper =
StepMockUtil.getStepMockHelper( DataGridMeta.class, "DataGrid_EmptyStringVsNull_Test" );
when( helper.stepMeta.getStepMetaInterface() ).thenReturn( meta );
MissingTransStep step = new MissingTransStep( helper.stepMeta, data, 0, helper.transMeta, helper.trans );
step.init( meta, data );
return step;
}
}
| pavel-sakun/pentaho-kettle | engine/src/test/java/org/pentaho/di/trans/steps/missing/MissingTransStepTest.java | Java | apache-2.0 | 3,423 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.ingest.geoip;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import java.io.IOException;
public class GeoIpTaskStateSerializationTests extends AbstractSerializingTestCase<GeoIpTaskState> {
@Override
protected GeoIpTaskState doParseInstance(XContentParser parser) throws IOException {
return GeoIpTaskState.fromXContent(parser);
}
@Override
protected Writeable.Reader<GeoIpTaskState> instanceReader() {
return GeoIpTaskState::new;
}
@Override
protected GeoIpTaskState createTestInstance() {
GeoIpTaskState state = GeoIpTaskState.EMPTY;
int databaseCount = randomInt(20);
for (int i = 0; i < databaseCount; i++) {
GeoIpTaskState.Metadata metadata = new GeoIpTaskState.Metadata(randomLong(), randomInt(), randomInt(), randomAlphaOfLength(32));
state = state.put(randomAlphaOfLengthBetween(5, 10), metadata);
}
return state;
}
}
| robin13/elasticsearch | modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTaskStateSerializationTests.java | Java | apache-2.0 | 1,449 |
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.streamsets.datacollector.restapi.bean;
import com.streamsets.datacollector.restapi.bean.ConfigConfigurationJson;
import com.streamsets.pipeline.api.Config;
import org.junit.Assert;
import org.junit.Test;
public class TestConfigConfigurationBean {
@Test(expected = NullPointerException.class)
public void testConfigConfigurationBeanNull() {
ConfigConfigurationJson configConfigurationJsonBean =
new ConfigConfigurationJson(null);
}
@Test
public void testConfigConfigurationBean() {
Config config =
new Config("url", "http://localhost:9090");
ConfigConfigurationJson configConfigurationJsonBean =
new ConfigConfigurationJson(config);
Assert.assertEquals(config.getName(), configConfigurationJsonBean.getName());
Assert.assertEquals(config.getValue(), configConfigurationJsonBean.getValue());
Assert.assertEquals(config.getName(), configConfigurationJsonBean.getConfigConfiguration().getName());
Assert.assertEquals(config.getValue(), configConfigurationJsonBean.getConfigConfiguration().getValue());
}
@Test
public void testConfigConfigurationBeanConstructorWithArgs() {
Config config =
new Config("url", "http://localhost:9090");
ConfigConfigurationJson configConfigurationJsonBean =
new ConfigConfigurationJson("url", "http://localhost:9090");
Assert.assertEquals(config.getName(), configConfigurationJsonBean.getName());
Assert.assertEquals(config.getValue(), configConfigurationJsonBean.getValue());
Assert.assertEquals(config.getName(), configConfigurationJsonBean.getConfigConfiguration().getName());
Assert.assertEquals(config.getValue(), configConfigurationJsonBean.getConfigConfiguration().getValue());
}
}
| rockmkd/datacollector | container/src/test/java/com/streamsets/datacollector/restapi/bean/TestConfigConfigurationBean.java | Java | apache-2.0 | 2,338 |
using System;
namespace Benchmarks.Models
{
public class User
{
public Guid Id { get; set; }
public string UserName { get; set; }
public string Email { get; set; }
public int Age { get; set; }
public string Address { get; set; }
public bool Active { get; set; }
public bool Deleted { get; set; }
public DateTime CreatedOn { get; set; }
public Role Role { get; set; }
}
}
| Excommunicated/ExpressMapper | PerformanceTest/Models/User.cs | C# | apache-2.0 | 460 |
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
VERSION: '5.3.4',
LANGUAGE: 'None',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt'
}; | tst-eclamar/earthenterprise | docs/geedocs/5.3.4/static/documentation_options.js | JavaScript | apache-2.0 | 275 |
/* Generated by camel build tools - do NOT edit this file! */
package org.apache.camel.component.aws2.eventbridge;
import java.util.Map;
import org.apache.camel.CamelContext;
import org.apache.camel.spi.ExtendedPropertyConfigurerGetter;
import org.apache.camel.spi.PropertyConfigurerGetter;
import org.apache.camel.spi.ConfigurerStrategy;
import org.apache.camel.spi.GeneratedPropertyConfigurer;
import org.apache.camel.util.CaseInsensitiveMap;
import org.apache.camel.support.component.PropertyConfigurerSupport;
/**
* Generated by camel build tools - do NOT edit this file!
*/
@SuppressWarnings("unchecked")
public class EventbridgeComponentConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
private org.apache.camel.component.aws2.eventbridge.EventbridgeConfiguration getOrCreateConfiguration(EventbridgeComponent target) {
if (target.getConfiguration() == null) {
target.setConfiguration(new org.apache.camel.component.aws2.eventbridge.EventbridgeConfiguration());
}
return target.getConfiguration();
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
EventbridgeComponent target = (EventbridgeComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": getOrCreateConfiguration(target).setAccessKey(property(camelContext, java.lang.String.class, value)); return true;
case "autowiredenabled":
case "autowiredEnabled": target.setAutowiredEnabled(property(camelContext, boolean.class, value)); return true;
case "configuration": target.setConfiguration(property(camelContext, org.apache.camel.component.aws2.eventbridge.EventbridgeConfiguration.class, value)); return true;
case "eventpatternfile":
case "eventPatternFile": getOrCreateConfiguration(target).setEventPatternFile(property(camelContext, java.lang.String.class, value)); return true;
case "eventbridgeclient":
case "eventbridgeClient": getOrCreateConfiguration(target).setEventbridgeClient(property(camelContext, software.amazon.awssdk.services.eventbridge.EventBridgeClient.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "operation": getOrCreateConfiguration(target).setOperation(property(camelContext, org.apache.camel.component.aws2.eventbridge.EventbridgeOperations.class, value)); return true;
case "overrideendpoint":
case "overrideEndpoint": getOrCreateConfiguration(target).setOverrideEndpoint(property(camelContext, boolean.class, value)); return true;
case "pojorequest":
case "pojoRequest": getOrCreateConfiguration(target).setPojoRequest(property(camelContext, boolean.class, value)); return true;
case "proxyhost":
case "proxyHost": getOrCreateConfiguration(target).setProxyHost(property(camelContext, java.lang.String.class, value)); return true;
case "proxyport":
case "proxyPort": getOrCreateConfiguration(target).setProxyPort(property(camelContext, java.lang.Integer.class, value)); return true;
case "proxyprotocol":
case "proxyProtocol": getOrCreateConfiguration(target).setProxyProtocol(property(camelContext, software.amazon.awssdk.core.Protocol.class, value)); return true;
case "region": getOrCreateConfiguration(target).setRegion(property(camelContext, java.lang.String.class, value)); return true;
case "secretkey":
case "secretKey": getOrCreateConfiguration(target).setSecretKey(property(camelContext, java.lang.String.class, value)); return true;
case "trustallcertificates":
case "trustAllCertificates": getOrCreateConfiguration(target).setTrustAllCertificates(property(camelContext, boolean.class, value)); return true;
case "uriendpointoverride":
case "uriEndpointOverride": getOrCreateConfiguration(target).setUriEndpointOverride(property(camelContext, java.lang.String.class, value)); return true;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": getOrCreateConfiguration(target).setUseDefaultCredentialsProvider(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public String[] getAutowiredNames() {
return new String[]{"eventbridgeClient"};
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return java.lang.String.class;
case "autowiredenabled":
case "autowiredEnabled": return boolean.class;
case "configuration": return org.apache.camel.component.aws2.eventbridge.EventbridgeConfiguration.class;
case "eventpatternfile":
case "eventPatternFile": return java.lang.String.class;
case "eventbridgeclient":
case "eventbridgeClient": return software.amazon.awssdk.services.eventbridge.EventBridgeClient.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "operation": return org.apache.camel.component.aws2.eventbridge.EventbridgeOperations.class;
case "overrideendpoint":
case "overrideEndpoint": return boolean.class;
case "pojorequest":
case "pojoRequest": return boolean.class;
case "proxyhost":
case "proxyHost": return java.lang.String.class;
case "proxyport":
case "proxyPort": return java.lang.Integer.class;
case "proxyprotocol":
case "proxyProtocol": return software.amazon.awssdk.core.Protocol.class;
case "region": return java.lang.String.class;
case "secretkey":
case "secretKey": return java.lang.String.class;
case "trustallcertificates":
case "trustAllCertificates": return boolean.class;
case "uriendpointoverride":
case "uriEndpointOverride": return java.lang.String.class;
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
EventbridgeComponent target = (EventbridgeComponent) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesskey":
case "accessKey": return getOrCreateConfiguration(target).getAccessKey();
case "autowiredenabled":
case "autowiredEnabled": return target.isAutowiredEnabled();
case "configuration": return target.getConfiguration();
case "eventpatternfile":
case "eventPatternFile": return getOrCreateConfiguration(target).getEventPatternFile();
case "eventbridgeclient":
case "eventbridgeClient": return getOrCreateConfiguration(target).getEventbridgeClient();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "operation": return getOrCreateConfiguration(target).getOperation();
case "overrideendpoint":
case "overrideEndpoint": return getOrCreateConfiguration(target).isOverrideEndpoint();
case "pojorequest":
case "pojoRequest": return getOrCreateConfiguration(target).isPojoRequest();
case "proxyhost":
case "proxyHost": return getOrCreateConfiguration(target).getProxyHost();
case "proxyport":
case "proxyPort": return getOrCreateConfiguration(target).getProxyPort();
case "proxyprotocol":
case "proxyProtocol": return getOrCreateConfiguration(target).getProxyProtocol();
case "region": return getOrCreateConfiguration(target).getRegion();
case "secretkey":
case "secretKey": return getOrCreateConfiguration(target).getSecretKey();
case "trustallcertificates":
case "trustAllCertificates": return getOrCreateConfiguration(target).isTrustAllCertificates();
case "uriendpointoverride":
case "uriEndpointOverride": return getOrCreateConfiguration(target).getUriEndpointOverride();
case "usedefaultcredentialsprovider":
case "useDefaultCredentialsProvider": return getOrCreateConfiguration(target).isUseDefaultCredentialsProvider();
default: return null;
}
}
}
| pax95/camel | components/camel-aws/camel-aws2-eventbridge/src/generated/java/org/apache/camel/component/aws2/eventbridge/EventbridgeComponentConfigurer.java | Java | apache-2.0 | 8,583 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.*;
import java.security.Permission;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.*;
import static org.hamcrest.core.StringContains.containsString;
import com.google.common.collect.Lists;
/**
* This class tests commands from DFSShell.
*/
public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
private static final AtomicInteger counter = new AtomicInteger();
private final int SUCCESS = 0;
private final int ERROR = 1;
static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
private static final String RAW_A1 = "raw.a1";
private static final String TRUSTED_A1 = "trusted.a1";
private static final String USER_A1 = "user.a1";
private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
static Path writeFile(FileSystem fs, Path f) throws IOException {
DataOutputStream out = fs.create(f);
out.writeBytes("dhruba: " + f);
out.close();
assertTrue(fs.exists(f));
return f;
}
static Path writeByte(FileSystem fs, Path f) throws IOException {
DataOutputStream out = fs.create(f);
out.writeByte(1);
out.close();
assertTrue(fs.exists(f));
return f;
}
static Path mkdir(FileSystem fs, Path p) throws IOException {
assertTrue(fs.mkdirs(p));
assertTrue(fs.exists(p));
assertTrue(fs.getFileStatus(p).isDirectory());
return p;
}
static File createLocalFile(File f) throws IOException {
assertTrue(!f.exists());
PrintWriter out = new PrintWriter(f);
out.print("createLocalFile: " + f.getAbsolutePath());
out.flush();
out.close();
assertTrue(f.exists());
assertTrue(f.isFile());
return f;
}
static File createLocalFileWithRandomData(int fileLength, File f)
throws IOException {
assertTrue(!f.exists());
f.createNewFile();
FileOutputStream out = new FileOutputStream(f.toString());
byte[] buffer = new byte[fileLength];
out.write(buffer);
out.flush();
out.close();
return f;
}
static void show(String s) {
System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
}
@Test (timeout = 30000)
public void testZeroSizeFile() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
try {
//create a zero size file
final File f1 = new File(TEST_ROOT_DIR, "f1");
assertTrue(!f1.exists());
assertTrue(f1.createNewFile());
assertTrue(f1.exists());
assertTrue(f1.isFile());
assertEquals(0L, f1.length());
//copy to remote
final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
final Path remotef = new Path(root, "dst");
show("copy local " + f1 + " to remote " + remotef);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
//getBlockSize() should not throw exception
show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
//copy back
final File f2 = new File(TEST_ROOT_DIR, "f2");
assertTrue(!f2.exists());
dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
assertTrue(f2.exists());
assertTrue(f2.isFile());
assertEquals(0L, f2.length());
f1.delete();
f2.delete();
} finally {
try {dfs.close();} catch (Exception e) {}
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testRecursiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),
fs instanceof DistributedFileSystem);
try {
fs.mkdirs(new Path(new Path("parent"), "child"));
try {
fs.delete(new Path("parent"), false);
assert(false); // should never reach here.
} catch(IOException e) {
//should have thrown an exception
}
try {
fs.delete(new Path("parent"), true);
} catch(IOException e) {
assert(false);
}
} finally {
try { fs.close();}catch(IOException e){};
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testDu() throws IOException {
int replication = 2;
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(replication).build();
DistributedFileSystem fs = cluster.getFileSystem();
PrintStream psBackup = System.out;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
try {
Path myPath = new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
assertTrue(fs.exists(myPath));
Path myFile = new Path("/test/dir/file");
writeFile(fs, myFile);
assertTrue(fs.exists(myFile));
Path myFile2 = new Path("/test/dir/file2");
writeFile(fs, myFile2);
assertTrue(fs.exists(myFile2));
Long myFileLength = fs.getFileStatus(myFile).getLen();
Long myFileDiskUsed = myFileLength * replication;
Long myFile2Length = fs.getFileStatus(myFile2).getLen();
Long myFile2DiskUsed = myFile2Length * replication;
String[] args = new String[2];
args[0] = "-du";
args[1] = "/test/dir";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
String returnString = out.toString();
out.reset();
// Check if size matchs as expected
assertThat(returnString, containsString(myFileLength.toString()));
assertThat(returnString, containsString(myFileDiskUsed.toString()));
assertThat(returnString, containsString(myFile2Length.toString()));
assertThat(returnString, containsString(myFile2DiskUsed.toString()));
// Check that -du -s reports the state of the snapshot
String snapshotName = "ss1";
Path snapshotPath = new Path(myPath, ".snapshot/" + snapshotName);
fs.allowSnapshot(myPath);
assertThat(fs.createSnapshot(myPath, snapshotName), is(snapshotPath));
assertThat(fs.delete(myFile, false), is(true));
assertThat(fs.exists(myFile), is(false));
args = new String[3];
args[0] = "-du";
args[1] = "-s";
args[2] = snapshotPath.toString();
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertThat(val, is(0));
returnString = out.toString();
out.reset();
Long combinedLength = myFileLength + myFile2Length;
Long combinedDiskUsed = myFileDiskUsed + myFile2DiskUsed;
assertThat(returnString, containsString(combinedLength.toString()));
assertThat(returnString, containsString(combinedDiskUsed.toString()));
// Check if output is rendered properly with multiple input paths
Path myFile3 = new Path("/test/dir/file3");
writeByte(fs, myFile3);
assertTrue(fs.exists(myFile3));
args = new String[3];
args[0] = "-du";
args[1] = "/test/dir/file3";
args[2] = "/test/dir/file2";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals("Return code should be 0.", 0, val);
returnString = out.toString();
out.reset();
assertTrue(returnString.contains("1 2 /test/dir/file3"));
assertTrue(returnString.contains("23 46 /test/dir/file2"));
} finally {
System.setOut(psBackup);
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testPut() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
try {
// remove left over crc files:
new File(TEST_ROOT_DIR, ".f1.crc").delete();
new File(TEST_ROOT_DIR, ".f2.crc").delete();
final File f1 = createLocalFile(new File(TEST_ROOT_DIR, "f1"));
final File f2 = createLocalFile(new File(TEST_ROOT_DIR, "f2"));
final Path root = mkdir(dfs, new Path("/test/put"));
final Path dst = new Path(root, "dst");
show("begin");
final Thread copy2ndFileThread = new Thread() {
@Override
public void run() {
try {
show("copy local " + f2 + " to remote " + dst);
dfs.copyFromLocalFile(false, false, new Path(f2.getPath()), dst);
} catch (IOException ioe) {
show("good " + StringUtils.stringifyException(ioe));
return;
}
//should not be here, must got IOException
assertTrue(false);
}
};
//use SecurityManager to pause the copying of f1 and begin copying f2
SecurityManager sm = System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager() {
private boolean firstTime = true;
@Override
public void checkPermission(Permission perm) {
if (firstTime) {
Thread t = Thread.currentThread();
if (!t.toString().contains("DataNode")) {
String s = "" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
//pause at FileUtil.copyContent
firstTime = false;
copy2ndFileThread.start();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
}
}
}
}
});
show("copy local " + f1 + " to remote " + dst);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), dst);
show("done");
try {copy2ndFileThread.join();} catch (InterruptedException e) { }
System.setSecurityManager(sm);
// copy multiple files to destination directory
final Path destmultiple = mkdir(dfs, new Path("/test/putmultiple"));
Path[] srcs = new Path[2];
srcs[0] = new Path(f1.getPath());
srcs[1] = new Path(f2.getPath());
dfs.copyFromLocalFile(false, false, srcs, destmultiple);
srcs[0] = new Path(destmultiple,"f1");
srcs[1] = new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
// move multiple files to destination directory
final Path destmultiple2 = mkdir(dfs, new Path("/test/movemultiple"));
srcs[0] = new Path(f1.getPath());
srcs[1] = new Path(f2.getPath());
dfs.moveFromLocalFile(srcs, destmultiple2);
assertFalse(f1.exists());
assertFalse(f2.exists());
srcs[0] = new Path(destmultiple2, "f1");
srcs[1] = new Path(destmultiple2, "f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
f1.delete();
f2.delete();
} finally {
try {dfs.close();} catch (Exception e) {}
cluster.shutdown();
}
}
/** check command error outputs and exit statuses. */
@Test (timeout = 30000)
public void testErrOutPut() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem srcFs = cluster.getFileSystem();
Path root = new Path("/nonexistentfile");
bak = System.err;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] argv = new String[2];
argv[0] = "-cat";
argv[1] = root.toUri().getPath();
int ret = ToolRunner.run(new FsShell(), argv);
assertEquals(" -cat returned 1 ", 1, ret);
String returned = out.toString();
assertTrue("cat does not print exceptions ",
(returned.lastIndexOf("Exception") == -1));
out.reset();
argv[0] = "-rm";
argv[1] = root.toString();
FsShell shell = new FsShell();
shell.setConf(conf);
ret = ToolRunner.run(shell, argv);
assertEquals(" -rm returned 1 ", 1, ret);
returned = out.toString();
out.reset();
assertTrue("rm prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
argv[0] = "-rmr";
argv[1] = root.toString();
ret = ToolRunner.run(shell, argv);
assertEquals(" -rmr returned 1", 1, ret);
returned = out.toString();
assertTrue("rmr prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-du";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -du prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-dus";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -dus prints reasonable error",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistenfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -ls does not return Found 0 items",
(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
assertEquals(" -lsr should fail ", 1, ret);
out.reset();
srcFs.mkdirs(new Path("/testdir"));
argv[0] = "-ls";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -ls does not print out anything ",
(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/user/nonxistant/*";
ret = ToolRunner.run(shell, argv);
assertEquals(" -ls on nonexistent glob returns 1", 1, ret);
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -mkdir returned 1 ", 1, ret);
assertTrue(" -mkdir returned File exists",
(returned.lastIndexOf("File exists") != -1));
Path testFile = new Path("/testfile");
OutputStream outtmp = srcFs.create(testFile);
outtmp.write(testFile.toString().getBytes());
outtmp.close();
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -mkdir returned 1", 1, ret);
assertTrue(" -mkdir returned this is a file ",
(returned.lastIndexOf("not a directory") != -1));
out.reset();
argv = new String[3];
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "file";
ret = ToolRunner.run(shell, argv);
assertEquals("mv failed to rename", 1, ret);
out.reset();
argv = new String[3];
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/testfiletest";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue("no output from rename",
(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/testfiletmp";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" unix like output",
(returned.lastIndexOf("No such file or") != -1));
out.reset();
argv = new String[1];
argv[0] = "-du";
srcFs.mkdirs(srcFs.getHomeDirectory());
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" no error ", 0, ret);
assertTrue("empty path specified",
(returned.lastIndexOf("empty string") == -1));
out.reset();
argv = new String[3];
argv[0] = "-test";
argv[1] = "-d";
argv[2] = "/no/such/dir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -test -d wrong result ", 1, ret);
assertTrue(returned.isEmpty());
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 30000)
public void testURIPaths() throws Exception {
Configuration srcConf = new HdfsConfiguration();
Configuration dstConf = new HdfsConfiguration();
MiniDFSCluster srcCluster = null;
MiniDFSCluster dstCluster = null;
File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
bak.mkdirs();
try{
srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs = srcCluster.getFileSystem();
FileSystem dstFs = dstCluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(srcConf);
//check for ls
String[] argv = new String[2];
argv[0] = "-ls";
argv[1] = dstFs.getUri().toString() + "/";
int ret = ToolRunner.run(shell, argv);
assertEquals("ls works on remote uri ", 0, ret);
//check for rm -r
dstFs.mkdirs(new Path("/hadoopdir"));
argv = new String[2];
argv[0] = "-rmr";
argv[1] = dstFs.getUri().toString() + "/hadoopdir";
ret = ToolRunner.run(shell, argv);
assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
//check du
argv[0] = "-du";
argv[1] = dstFs.getUri().toString() + "/";
ret = ToolRunner.run(shell, argv);
assertEquals("du works on remote uri ", 0, ret);
//check put
File furi = new File(TEST_ROOT_DIR, "furi");
createLocalFile(furi);
argv = new String[3];
argv[0] = "-put";
argv[1] = furi.toURI().toString();
argv[2] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" put is working ", 0, ret);
//check cp
argv[0] = "-cp";
argv[1] = dstFs.getUri().toString() + "/furi";
argv[2] = srcFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" cp is working ", 0, ret);
assertTrue(srcFs.exists(new Path("/furi")));
//check cat
argv = new String[2];
argv[0] = "-cat";
argv[1] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" cat is working ", 0, ret);
//check chown
dstFs.delete(new Path("/furi"), true);
dstFs.delete(new Path("/hadoopdir"), true);
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(dstFs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
confirmOwner(null, "herbivores", dstFs, parent, path);
runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
confirmOwner(null, "reptiles", dstFs, root, parent, path);
//check if default hdfs:/// works
argv[0] = "-cat";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" default works for cat", 0, ret);
argv[0] = "-ls";
argv[1] = "hdfs:///";
ret = ToolRunner.run(shell, argv);
assertEquals("default works for ls ", 0, ret);
argv[0] = "-rmr";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
assertEquals("default works for rm/rmr", 0, ret);
} finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
@Test (timeout = 30000)
public void testText() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem dfs = cluster.getFileSystem();
textTest(new Path("/texttest").makeQualified(dfs.getUri(),
dfs.getWorkingDirectory()), conf);
conf.set("fs.defaultFS", dfs.getUri().toString());
final FileSystem lfs = FileSystem.getLocal(conf);
textTest(new Path(TEST_ROOT_DIR, "texttest").makeQualified(lfs.getUri(),
lfs.getWorkingDirectory()), conf);
} finally {
if (null != cluster) {
cluster.shutdown();
}
}
}
private void textTest(Path root, Configuration conf) throws Exception {
PrintStream bak = null;
try {
final FileSystem fs = root.getFileSystem(conf);
fs.mkdirs(root);
// Test the gzip type of files. Magic detection.
OutputStream zout = new GZIPOutputStream(
fs.create(new Path(root, "file.gz")));
Random r = new Random();
bak = System.out;
ByteArrayOutputStream file = new ByteArrayOutputStream();
for (int i = 0; i < 1024; ++i) {
char c = Character.forDigit(r.nextInt(26) + 10, 36);
file.write(c);
zout.write(c);
}
zout.close();
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
String[] argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
int ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(file.toByteArray(), out.toByteArray()));
// Create a sequence file with a gz extension, to test proper
// container detection. Magic detection.
SequenceFile.Writer writer = SequenceFile.createWriter(
conf,
SequenceFile.Writer.file(new Path(root, "file.gz")),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(Text.class));
writer.append(new Text("Foo"), new Text("Bar"));
writer.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
out.reset();
// Test deflate. Extension-based detection.
OutputStream dout = new DeflaterOutputStream(
fs.create(new Path(root, "file.deflate")));
byte[] outbytes = "foo".getBytes();
dout.write(outbytes);
dout.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.deflate").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(outbytes, out.toByteArray()));
out.reset();
// Test a simple codec. Extension based detection. We use
// Bzip2 cause its non-native.
CompressionCodec codec = ReflectionUtils.newInstance(BZip2Codec.class, conf);
String extension = codec.getDefaultExtension();
Path p = new Path(root, "file." + extension);
OutputStream fout = new DataOutputStream(codec.createOutputStream(
fs.create(p, true)));
byte[] writebytes = "foo".getBytes();
fout.write(writebytes);
fout.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, p).toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(writebytes, out.toByteArray()));
out.reset();
// Test a plain text.
OutputStream pout = fs.create(new Path(root, "file.txt"));
writebytes = "bar".getBytes();
pout.write(writebytes);
pout.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.txt").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(writebytes, out.toByteArray()));
out.reset();
} finally {
if (null != bak) {
System.setOut(bak);
}
}
}
@Test (timeout = 30000)
public void testCopyToLocal() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "copyToLocal");
// Verify copying the tree
{
try {
assertEquals(0,
runCmd(shell, "-copyToLocal", root + "*", TEST_ROOT_DIR));
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
File localroot = new File(TEST_ROOT_DIR, "copyToLocal");
File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
File f1 = new File(localroot, "f1");
assertTrue("Copying failed.", f1.isFile());
File f2 = new File(localroot, "f2");
assertTrue("Copying failed.", f2.isFile());
File sub = new File(localroot, "sub");
assertTrue("Copying failed.", sub.isDirectory());
File f3 = new File(sub, "f3");
assertTrue("Copying failed.", f3.isFile());
File f4 = new File(sub, "f4");
assertTrue("Copying failed.", f4.isFile());
File f5 = new File(localroot2, "f1");
assertTrue("Copying failed.", f5.isFile());
f1.delete();
f2.delete();
f3.delete();
f4.delete();
f5.delete();
sub.delete();
}
// Verify copying non existing sources do not create zero byte
// destination files
{
String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
try {
assertEquals(1, shell.run(args));
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
File f6 = new File(TEST_ROOT_DIR, "nosuchfile");
assertTrue(!f6.exists());
}
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
static String createTree(FileSystem fs, String name) throws IOException {
// create a tree
// ROOT
// |- f1
// |- f2
// + sub
// |- f3
// |- f4
// ROOT2
// |- f1
String path = "/test/" + name;
Path root = mkdir(fs, new Path(path));
Path sub = mkdir(fs, new Path(root, "sub"));
Path root2 = mkdir(fs, new Path(path + "2"));
writeFile(fs, new Path(root, "f1"));
writeFile(fs, new Path(root, "f2"));
writeFile(fs, new Path(sub, "f3"));
writeFile(fs, new Path(sub, "f4"));
writeFile(fs, new Path(root2, "f1"));
mkdir(fs, new Path(root2, "sub"));
return path;
}
@Test (timeout = 30000)
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "count");
// Verify the counts
runCount(root, 2, 4, shell);
runCount(root + "2", 2, 1, shell);
runCount(root + "2/f1", 0, 1, shell);
runCount(root + "2/sub", 1, 0, shell);
final FileSystem localfs = FileSystem.getLocal(conf);
Path localpath = new Path(TEST_ROOT_DIR, "testcount");
localpath = localpath.makeQualified(localfs.getUri(),
localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, shell);
assertEquals(0, runCmd(shell, "-count", root, localstr));
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
private static void runCount(String path, long dirs, long files, FsShell shell
) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bytes);
PrintStream oldOut = System.out;
System.setOut(out);
Scanner in = null;
String results = null;
try {
runCmd(shell, "-count", path);
results = bytes.toString();
in = new Scanner(results);
assertEquals(dirs, in.nextLong());
assertEquals(files, in.nextLong());
} finally {
if (in!=null) in.close();
IOUtils.closeStream(out);
System.setOut(oldOut);
System.out.println("results:\n" + results);
}
}
//throws IOException instead of Exception as shell.run() does.
private static int runCmd(FsShell shell, String... args) throws IOException {
StringBuilder cmdline = new StringBuilder("RUN:");
for (String arg : args) cmdline.append(" " + arg);
LOG.info(cmdline.toString());
try {
int exitCode;
exitCode = shell.run(args);
LOG.info("RUN: "+args[0]+" exit=" + exitCode);
return exitCode;
} catch (IOException e) {
LOG.error("RUN: "+args[0]+" IOException="+e.getMessage());
throw e;
} catch (RuntimeException e) {
LOG.error("RUN: "+args[0]+" RuntimeException="+e.getMessage());
throw e;
} catch (Exception e) {
LOG.error("RUN: "+args[0]+" Exception="+e.getMessage());
throw new IOException(StringUtils.stringifyException(e));
}
}
/**
* Test chmod.
*/
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
throws IOException {
FsShell shell = new FsShell();
shell.setConf(conf);
try {
//first make dir
Path dir = new Path(chmodDir);
fs.delete(dir, true);
fs.mkdirs(dir);
confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
/* Should give */ "rwxrw----", fs, shell, dir);
//create an empty file
Path file = new Path(chmodDir, "file");
TestDFSShell.writeFile(fs, file);
//test octal mode
confirmPermissionChange("644", "rw-r--r--", fs, shell, file);
//test recursive
runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
assertEquals("rwxrwxrwx",
fs.getFileStatus(dir).getPermission().toString());
assertEquals("rw-rw-rw-",
fs.getFileStatus(file).getPermission().toString());
// Skip "sticky bit" tests on Windows.
//
if (!Path.WINDOWS) {
// test sticky bit on directories
Path dir2 = new Path(dir, "stickybit");
fs.mkdirs(dir2);
LOG.info("Testing sticky bit on: " + dir2);
LOG.info("Sticky bit directory initial mode: " +
fs.getFileStatus(dir2).getPermission());
confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);
confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("=t", "--------T", fs, shell, dir2);
confirmPermissionChange("0000", "---------", fs, shell, dir2);
confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);
confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
fs.delete(dir2, true);
} else {
LOG.info("Skipped sticky bit tests on Windows");
}
fs.delete(dir, true);
} finally {
try {
fs.close();
shell.close();
} catch (IOException ignored) {}
}
}
// Apply a new permission to a path and confirm that the new permission
// is the one you were expecting
private void confirmPermissionChange(String toApply, String expected,
FileSystem fs, FsShell shell, Path dir2) throws IOException {
LOG.info("Confirming permission change of " + toApply + " to " + expected);
runCmd(shell, "-chmod", toApply, dir2.toString());
String result = fs.getFileStatus(dir2).getPermission().toString();
LOG.info("Permission change result: " + result);
assertEquals(expected, result);
}
private void confirmOwner(String owner, String group,
FileSystem fs, Path... paths) throws IOException {
for(Path path : paths) {
if (owner != null) {
assertEquals(owner, fs.getFileStatus(path).getOwner());
}
if (group != null) {
assertEquals(group, fs.getFileStatus(path).getGroup());
}
}
}
@Test (timeout = 30000)
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
//test chmod on local fs
FileSystem fs = FileSystem.getLocal(conf);
testChmod(conf, fs,
(new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
//test chmod on DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem();
testChmod(conf, fs, "/tmp/chmodTest");
// test chown and chgrp on DFS:
FsShell shell = new FsShell();
shell.setConf(conf);
fs = cluster.getFileSystem();
/* For dfs, I am the super user and I can change owner of any file to
* anything. "-R" option is already tested by chmod test above.
*/
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(fs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
confirmOwner(null, "herbivores", fs, parent, path);
runCmd(shell, "-chgrp", "mammals", file);
confirmOwner(null, "mammals", fs, path);
runCmd(shell, "-chown", "-R", ":reptiles", "/");
confirmOwner(null, "reptiles", fs, root, parent, path);
runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
confirmOwner("python", "reptiles", fs, path);
runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
confirmOwner("hadoop", "toys", fs, root, parent, path);
// Test different characters in names
runCmd(shell, "-chown", "hdfs.user", file);
confirmOwner("hdfs.user", null, fs, path);
runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
runCmd(shell, "-chown", "hdfs/hadoop-core@apache.org:asf-projects", file);
confirmOwner("hdfs/hadoop-core@apache.org", "asf-projects", fs, path);
runCmd(shell, "-chgrp", "hadoop-core@apache.org/100", file);
confirmOwner(null, "hadoop-core@apache.org/100", fs, path);
cluster.shutdown();
}
/**
* Tests various options of DFSShell.
*/
@Test (timeout = 120000)
public void testDFSShell() throws IOException {
Configuration conf = new HdfsConfiguration();
/* This tests some properties of ChecksumFileSystem as well.
* Make sure that we create ChecksumDFS */
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
DistributedFileSystem fileSys = (DistributedFileSystem)fs;
FsShell shell = new FsShell();
shell.setConf(conf);
try {
// First create a new directory with mkdirs
Path myPath = new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
// Second, create a file in that directory.
Path myFile = new Path("/test/mkdirs/myFile");
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
Path myFile2 = new Path("/test/mkdirs/myFile2");
writeFile(fileSys, myFile2);
assertTrue(fileSys.exists(myFile2));
// Verify that rm with a pattern
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile*";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
assertFalse(fileSys.exists(myFile));
assertFalse(fileSys.exists(myFile2));
//re-create the files for other tests
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
writeFile(fileSys, myFile2);
assertTrue(fileSys.exists(myFile2));
}
// Verify that we can read the file
{
String[] args = new String[3];
args[0] = "-cat";
args[1] = "/test/mkdirs/myFile";
args[2] = "/test/mkdirs/myFile2";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run: " +
StringUtils.stringifyException(e));
}
assertTrue(val == 0);
}
fileSys.delete(myFile2, true);
// Verify that we get an error while trying to read an nonexistent file
{
String[] args = new String[2];
args[0] = "-cat";
args[1] = "/test/mkdirs/myFile1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
// Verify that we get an error while trying to delete an nonexistent file
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
// Verify that we succeed in removing the file we created
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// Verify touch/test
{
String[] args;
int val;
args = new String[3];
args[0] = "-test";
args[1] = "-e";
args[2] = "/test/mkdirs/noFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args[1] = "-z";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args = new String[2];
args[0] = "-touchz";
args[1] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
args = new String[2];
args[0] = "-touchz";
args[1] = "/test/mkdirs/thisDirNotExists/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args = new String[3];
args[0] = "-test";
args[1] = "-e";
args[2] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
args[1] = "-d";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args[1] = "-z";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify that cp from a directory to a subdirectory fails
{
String[] args = new String[2];
args[0] = "-mkdir";
args[1] = "/test/dir1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
// this should fail
String[] args1 = new String[3];
args1[0] = "-cp";
args1[1] = "/test/dir1";
args1[2] = "/test/dir1/dir2";
val = 0;
try {
val = shell.run(args1);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
// this should succeed
args1[0] = "-cp";
args1[1] = "/test/dir1";
args1[2] = "/test/dir1foo";
val = -1;
try {
val = shell.run(args1);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify -test -f negative case (missing file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = "/test/mkdirs/noFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -f negative case (directory rather than file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = "/test/mkdirs";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -f positive case
{
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify -test -s negative case (missing file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = "/test/mkdirs/noFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -s negative case (zero length file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = "/test/mkdirs/isFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -s positive case (nonzero length file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
} finally {
try {
fileSys.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
List<File> files = new ArrayList<File>();
List<DataNode> datanodes = cluster.getDataNodes();
String poolId = cluster.getNamesystem().getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
for(int i = 0; i < blocks.size(); i++) {
DataNode dn = datanodes.get(i);
Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
for(Block b : e.getValue()) {
files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
}
}
}
return files;
}
static void corrupt(List<File> files) throws IOException {
for(File f : files) {
StringBuilder content = new StringBuilder(DFSTestUtil.readFile(f));
char c = content.charAt(0);
content.setCharAt(0, ++c);
PrintWriter out = new PrintWriter(f);
out.print(content);
out.flush();
out.close();
}
}
static interface TestGetRunner {
String run(int exitcode, String... options) throws IOException;
}
@Test (timeout = 30000)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
MiniDFSCluster dfs = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = dfs.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
fs.setPermission(p, new FsPermission((short)0700));
bak = System.err;
tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FsShell fshell = new FsShell(conf);
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] args = new String[2];
args[0] = "-ls";
args[1] = "/foo";
int ret = ToolRunner.run(fshell, args);
assertEquals("returned should be 1", 1, ret);
String str = out.toString();
assertTrue("permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (dfs != null) {
dfs.shutdown();
}
}
}
@Test (timeout = 30000)
public void testGet() throws IOException {
GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.ALL);
final String fname = "testGet.txt";
Path root = new Path("/test/get");
final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
TestGetRunner runner = new TestGetRunner() {
private int count = 0;
private final FsShell shell = new FsShell(conf);
public String run(int exitcode, String... options) throws IOException {
String dst = new File(TEST_ROOT_DIR, fname + ++count)
.getAbsolutePath();
String[] args = new String[options.length + 3];
args[0] = "-get";
args[args.length - 2] = remotef.toString();
args[args.length - 1] = dst;
for(int i = 0; i < options.length; i++) {
args[i + 1] = options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode, shell.run(args));
} catch (Exception e) {
assertTrue(StringUtils.stringifyException(e), false);
}
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
}
};
File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
.build();
dfs = cluster.getFileSystem();
mkdir(dfs, root);
dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
String localfcontent = DFSTestUtil.readFile(localf);
assertEquals(localfcontent, runner.run(0));
assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
// find block files to modify later
List<File> files = getBlockFiles(cluster);
// Shut down cluster and then corrupt the block files by overwriting a
// portion with junk data. We must shut down the cluster so that threads
// in the data node do not hold locks on the block files while we try to
// write into them. Particularly on Windows, the data node's use of the
// FileChannel.transferTo method can cause block files to be memory mapped
// in read-only mode during the transfer to a client, and this causes a
// locking conflict. The call to shutdown the cluster blocks until all
// DataXceiver threads exit, preventing this problem.
dfs.close();
cluster.shutdown();
show("files=" + files);
corrupt(files);
// Start the cluster again, but do not reformat, so prior files remain.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false)
.build();
dfs = cluster.getFileSystem();
assertEquals(null, runner.run(1));
String corruptedcontent = runner.run(0, "-ignoreCrc");
assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
} finally {
if (null != dfs) {
try {
dfs.close();
} catch (Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
@Test (timeout = 30000)
public void testLsr() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
final String root = createTree(dfs, "lsr");
dfs.mkdirs(new Path(root, "zzz"));
runLsr(new FsShell(conf), root, 0);
final Path sub = new Path(root, "sub");
dfs.setPermission(sub, new FsPermission((short)0));
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final String tmpusername = ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
tmpusername, new String[] {tmpusername});
String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return runLsr(new FsShell(conf), root, 1);
}
});
assertTrue(results.contains("zzz"));
} finally {
cluster.shutdown();
}
}
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
System.setOut(out);
System.setErr(out);
final String results;
try {
assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
results = bytes.toString();
} finally {
IOUtils.closeStream(out);
System.setOut(oldOut);
System.setErr(oldErr);
}
System.out.println("results:\n" + results);
return results;
}
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test (timeout = 30000)
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
admin.setConf(conf);
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
// ACLs)
@Test (timeout = 120000)
public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
fs.create(src).close();
fs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
FileStatus status = fs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
fs.setXAttr(src, USER_A1, USER_A1_VALUE);
fs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(conf);
// -p
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptop
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopx
Path target3 = new Path(hdfsTestDir, "targetfile3");
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
target3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa
Path target4 = new Path(hdfsTestDir, "targetfile4");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
// -ptoa (verify -pa option will preserve permissions also)
Path target5 = new Path(hdfsTestDir, "targetfile5");
argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
target5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptoa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
@Test (timeout = 120000)
public void testCopyCommandsWithRawXAttrs() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
final Path src = new Path(hdfsTestDir, "srcfile");
final String rawSrcBase = "/.reserved/raw" + testdir;
final Path rawSrc = new Path(rawSrcBase, "srcfile");
fs.create(src).close();
final Path srcDir = new Path(hdfsTestDir, "srcdir");
final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir");
fs.mkdirs(srcDir);
final Path srcDirFile = new Path(srcDir, "srcfile");
final Path rawSrcDirFile =
new Path("/.reserved/raw" + srcDirFile);
fs.create(srcDirFile).close();
final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile };
final String[] xattrNames = { USER_A1, RAW_A1 };
final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE };
for (int i = 0; i < paths.length; i++) {
for (int j = 0; j < xattrNames.length; j++) {
fs.setXAttr(paths[i], xattrNames[j], xattrVals[j]);
}
}
shell = new FsShell(conf);
/* Check that a file as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, fs, src, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, src, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, rawHdfsTestDir, true);
/* Use a relative /.reserved/raw path. */
final Path savedWd = fs.getWorkingDirectory();
try {
fs.setWorkingDirectory(new Path(rawSrcBase));
final Path relRawSrc = new Path("../srcfile");
final Path relRawHdfsTestDir = new Path("..");
doTestCopyCommandsWithRawXAttrs(shell, fs, relRawSrc, relRawHdfsTestDir,
true);
} finally {
fs.setWorkingDirectory(savedWd);
}
/* Check that a directory as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, rawHdfsTestDir,
true);
/* Use relative in an absolute path. */
final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" +
testdir + "/srcdir";
final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" +
testdir;
doTestCopyCommandsWithRawXAttrs(shell, fs, new Path(relRawSrcDir),
new Path(relRawDstDir), true);
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs,
Path src, Path hdfsTestDir, boolean expectRaw) throws Exception {
Path target;
boolean srcIsRaw;
if (src.isAbsolute()) {
srcIsRaw = src.toString().contains("/.reserved/raw");
} else {
srcIsRaw = new Path(fs.getWorkingDirectory(), src).
toString().contains("/.reserved/raw");
}
final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw");
final boolean srcDestMismatch = srcIsRaw ^ destIsRaw;
// -p (possibly preserve raw if src & dst are both /.r/r */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
// -px (possibly preserve raw, always preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS);
checkXAttrs(fs, target, expectRaw, true);
}
// no args (possibly preserve raw, never preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, null, ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
}
private Path doCopyAndTest(FsShell shell, Path dest, Path src,
String cpArgs, int expectedExitCode) throws Exception {
final Path target = new Path(dest, "targetfile" +
counter.getAndIncrement());
final String[] argv = cpArgs == null ?
new String[] { "-cp", src.toUri().toString(),
target.toUri().toString() } :
new String[] { "-cp", cpArgs, src.toUri().toString(),
target.toUri().toString() };
final int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", expectedExitCode, ret);
return target;
}
private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
boolean expectVanillaXAttrs) throws Exception {
final Map<String, byte[]> xattrs = fs.getXAttrs(target);
int expectedCount = 0;
if (expectRaw) {
assertArrayEquals("raw.a1 has incorrect value",
RAW_A1_VALUE, xattrs.get(RAW_A1));
expectedCount++;
}
if (expectVanillaXAttrs) {
assertArrayEquals("user.a1 has incorrect value",
USER_A1_VALUE, xattrs.get(USER_A1));
expectedCount++;
}
assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
}
// verify cp -ptopxa option will preserve directory attributes.
@Test (timeout = 120000)
public void testCopyCommandsToDirectoryWithPreserveOption()
throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir = new Path(hdfsTestDir, "srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
fs.setPermission(srcDir,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
// Create a file in srcDir to check if modification time of
// srcDir to be preserved after copying the file.
// If cp -p command is to preserve modification time and then copy child
// (srcFile), modification time will not be preserved.
Path srcFile = new Path(srcDir, "srcFile");
fs.create(srcFile).close();
FileStatus status = fs.getFileStatus(srcDir);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
fs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
fs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(conf);
// -p
Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
targetDir1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(targetDir1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptop
Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
targetDir2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopx
Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
targetDir3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa
Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
targetDir4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir4));
// -ptoa (verify -pa option will preserve permissions also)
Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
targetDir5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptoa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir5));
} finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// Verify cp -pa option will preserve both ACL and sticky bit.
@Test (timeout = 120000)
public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
fs.create(src).close();
fs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
fs.setPermission(src,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
FileStatus status = fs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
shell = new FsShell(conf);
// -p preserves sticky bit and doesn't preserve ACL
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa preserves both sticky bit and ACL
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls = fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// force Copy Option is -f
@Test (timeout = 30000)
public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(fs, new Path(testdir, "testFileForPut"));
shell = new FsShell();
// Tests for put
String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
assertEquals("put -f is not working", SUCCESS, res);
argv = new String[] { "-put", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("put command itself is able to overwrite the file", ERROR,
res);
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("copyFromLocal -f is not working", SUCCESS, res);
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(
"copyFromLocal command itself is able to overwrite the file", ERROR,
res);
// Tests for cp
argv = new String[] { "-cp", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("cp -f is not working", SUCCESS, res);
argv = new String[] { "-cp", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("cp command itself is able to overwrite the file", ERROR,
res);
} finally {
if (null != shell)
shell.close();
if (localFile.exists())
localFile.delete();
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// setrep for file and directory.
@Test (timeout = 30000)
public void testSetrep() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir1 = "/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2 = testdir1 + "/nestedDir";
final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep");
final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep");
final Short oldRepFactor = new Short((short) 1);
final Short newRepFactor = new Short((short) 3);
try {
String[] argv;
cluster.waitActive();
fs = cluster.getFileSystem();
assertThat(fs.mkdirs(new Path(testdir2)), is(true));
shell = new FsShell(conf);
fs.create(hdfsFile1, true).close();
fs.create(hdfsFile2, true).close();
// Tests for setrep on a file.
argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(oldRepFactor));
// Tests for setrep
// Tests for setrep on a directory and make sure it is applied recursively.
argv = new String[] { "-setrep", newRepFactor.toString(), testdir1 };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(newRepFactor));
} finally {
if (shell != null) {
shell.close();
}
cluster.shutdown();
}
}
/**
* Delete a file optionally configuring trash on the server and client.
*/
private void deleteFileUsingTrash(
boolean serverTrash, boolean clientTrash) throws Exception {
// Run a cluster, optionally with trash enabled on the server
Configuration serverConf = new HdfsConfiguration();
if (serverTrash) {
serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf)
.numDataNodes(1).format(true).build();
Configuration clientConf = new Configuration(serverConf);
// Create a client, optionally with trash enabled
if (clientTrash) {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
} else {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
}
FsShell shell = new FsShell(clientConf);
FileSystem fs = null;
try {
// Create and delete a file
fs = cluster.getFileSystem();
// Use a separate tmp dir for each invocation.
final String testdir = "/tmp/TestDFSShell-deleteFileUsingTrash-" +
counter.getAndIncrement();
writeFile(fs, new Path(testdir, "foo"));
final String testFile = testdir + "/foo";
final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
String[] argv = new String[] { "-rm", testFile };
int res = ToolRunner.run(shell, argv);
assertEquals("rm failed", 0, res);
if (serverTrash) {
// If the server config was set we should use it unconditionally
assertTrue("File not in trash", fs.exists(new Path(trashFile)));
} else if (clientTrash) {
// If the server config was not set but the client config was
// set then we should use it
assertTrue("File not in trashed", fs.exists(new Path(trashFile)));
} else {
// If neither was set then we should not have trashed the file
assertFalse("File was not removed", fs.exists(new Path(testFile)));
assertFalse("File was trashed", fs.exists(new Path(trashFile)));
}
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 300000)
public void testAppendToFile() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
File file2 = new File(testRoot, "file2");
createLocalFileWithRandomData(inputFileLength, file1);
createLocalFileWithRandomData(inputFileLength, file2);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),
dfs instanceof DistributedFileSystem);
// Run appendToFile once, make sure that the target file is
// created and is of the right size.
Path remoteFile = new Path("/remoteFile");
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res, is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2));
// Run the command once again and make sure that the target file
// size has been doubled.
res = ToolRunner.run(shell, argv);
assertThat(res, is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4));
} finally {
cluster.shutdown();
}
}
@Test (timeout = 300000)
public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
createLocalFileWithRandomData(inputFileLength, file1);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),
dfs instanceof DistributedFileSystem);
// Run appendToFile with insufficient arguments.
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", file1.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res, not(0));
// Mix stdin with other input files. Must fail.
Path remoteFile = new Path("/remoteFile");
argv = new String[] {
"-appendToFile", file1.toString(), "-", remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertThat(res, not(0));
} finally {
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testSetXAttrPermission() throws Exception {
UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission to write xattr
fs.setPermission(p, new FsPermission((short) 0700));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
// No permission to read and remove
fs.setPermission(p, new FsPermission((short) 0750));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.a1", "/foo"});
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-x", "user.a1", "/foo"});
assertEquals("Returned should be 1", 1, ret);
str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/* HDFS-6413 xattr names erroneously handled as case-insensitive */
@Test (timeout = 30000)
public void testSetXAttrCaseSensitivity() throws Exception {
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/mydir");
fs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
new String[] {"user.Foo=", "user.FOO=", "user.foo="});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {"foo"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {"FOO"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
new String[] {},
new String[] {"Foo"});
} finally {
if (bak != null) {
System.setOut(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
String[] setOp, String[] getOp, String[] expectArr,
String[] dontExpectArr) throws Exception {
int ret = ToolRunner.run(fshell, setOp);
out.reset();
ret = ToolRunner.run(fshell, getOp);
final String str = out.toString();
for (int i = 0; i < expectArr.length; i++) {
final String expect = expectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Expected: ");
sb.append(expect).append(" Full Result: ");
sb.append(str);
assertTrue(sb.toString(),
str.indexOf(expect) != -1);
}
for (int i = 0; i < dontExpectArr.length; i++) {
String dontExpect = dontExpectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Didn't Expect: ");
sb.append(dontExpect).append(" Full Result: ");
sb.append(str);
assertTrue(sb.toString(),
str.indexOf(dontExpect) == -1);
}
out.reset();
}
/**
*
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
*
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
*
* As user2: Set an xattr (should pass with path access).
*
* As user1: Set an xattr (should pass).
*
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
*
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
*
* As user1: Change permissions only to owner
*
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
*
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test (timeout = 30000)
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1 = "user1";
final String GROUP1 = "supergroup";
final UserGroupInformation user1 = UserGroupInformation.
createUserForTesting(USER1, new String[] {GROUP1});
final UserGroupInformation user2 = UserGroupInformation.
createUserForTesting("user2", new String[] {"mygroup2"});
final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
fs.setOwner(new Path("/"), USER1, GROUP1);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
//Test 1. Let user1 be owner for /foo
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-mkdir", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
//Test 2. Give access to others
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "707", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 3. Should be allowed to write xattr if there is a path access to
// user (user2).
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
//Test 4. There should be permission to write xattr for
// the owning user with write permissions.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 5. There should be permission to read non-owning user (user2) if
// there is path access to that user and also can remove.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
"user.a1", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a1", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 6. There should be permission to read/remove for
// the owning user with path access.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
return null;
}
});
// Test 7. Change permission to have path access only to owner(user1)
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "700", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 8. There should be no permissions to set for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"user.a2", "/foo" });
assertEquals("Returned should be 1", 1, ret);
final String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
// Test 9. There should be no permissions to remove for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a2", "/foo" });
assertEquals("Returned should be 1", 1, ret);
final String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
// Test 10. Superuser should be allowed to set with trusted namespace
SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"trusted.a3", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/*
* 1. Test that CLI throws an exception and returns non-0 when user does
* not have permission to read an xattr.
* 2. Test that CLI throws an exception and returns non-0 when a non-existent
* xattr is requested.
*/
@Test (timeout = 120000)
public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
MiniDFSCluster cluster = null;
PrintStream bakErr = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/foo");
fs.mkdirs(p);
bakErr = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission for "other".
fs.setPermission(p, new FsPermission((short) 0700));
{
final int ret = ToolRunner.run(fshell, new String[] {
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[] {
"-getfattr", "-n", "user.a1", "/foo"});
String str = out.toString();
assertTrue("xattr value was incorrectly returned",
str.indexOf("1234") == -1);
out.reset();
return null;
}
});
{
final int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.nonexistent", "/foo"});
String str = out.toString();
assertTrue("xattr value was incorrectly returned",
str.indexOf(
"getfattr: At least one of the attributes provided was not found")
>= 0);
out.reset();
}
} finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that the server trash configuration is respected when
* the client configuration is not set.
*/
@Test (timeout = 30000)
public void testServerConfigRespected() throws Exception {
deleteFileUsingTrash(true, false);
}
/**
* Test that server trash configuration is respected even when the
* client configuration is set.
*/
@Test (timeout = 30000)
public void testServerConfigRespectedWithClient() throws Exception {
deleteFileUsingTrash(true, true);
}
/**
* Test that the client trash configuration is respected when
* the server configuration is not set.
*/
@Test (timeout = 30000)
public void testClientConfigRespected() throws Exception {
deleteFileUsingTrash(false, true);
}
/**
* Test that trash is disabled by default.
*/
@Test (timeout = 30000)
public void testNoTrashConfig() throws Exception {
deleteFileUsingTrash(false, false);
}
}
| korrelate/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | Java | apache-2.0 | 107,785 |
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package java.util.concurrent;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* Minimal emulation of {@link java.util.concurrent.ConcurrentHashMap}.
* Note that the javascript interpreter is <a
* href="http://code.google.com/docreader/#p=google-web-toolkit-doc-1-5&t=DevGuideJavaCompatibility">
* single-threaded</a>, it is essentially a {@link java.util.HashMap},
* implementing the new methods introduced by {@link ConcurrentMap}.
*
* @author Hayward Chan
*/
public class ConcurrentHashMap<K, V>
extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
private final Map<K, V> backingMap;
public ConcurrentHashMap() {
this.backingMap = new HashMap<K, V>();
}
public ConcurrentHashMap(int initialCapacity) {
this.backingMap = new HashMap<K, V>(initialCapacity);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this.backingMap = new HashMap<K, V>(initialCapacity, loadFactor);
}
public ConcurrentHashMap(Map<? extends K, ? extends V> t) {
this.backingMap = new HashMap<K, V>(t);
}
public V putIfAbsent(K key, V value) {
if (!containsKey(key)) {
return put(key, value);
} else {
return get(key);
}
}
public boolean remove(Object key, Object value) {
if (containsKey(key) && get(key).equals(value)) {
remove(key);
return true;
} else {
return false;
}
}
public boolean replace(K key, V oldValue, V newValue) {
if (oldValue == null || newValue == null) {
throw new NullPointerException();
} else if (containsKey(key) && get(key).equals(oldValue)) {
put(key, newValue);
return true;
} else {
return false;
}
}
public V replace(K key, V value) {
if (value == null) {
throw new NullPointerException();
} else if (containsKey(key)) {
return put(key, value);
} else {
return null;
}
}
@Override public boolean containsKey(Object key) {
if (key == null) {
throw new NullPointerException();
}
return backingMap.containsKey(key);
}
@Override public V get(Object key) {
if (key == null) {
throw new NullPointerException();
}
return backingMap.get(key);
}
@Override public V put(K key, V value) {
if (key == null || value == null) {
throw new NullPointerException();
}
return backingMap.put(key, value);
}
@Override public boolean containsValue(Object value) {
if (value == null) {
throw new NullPointerException();
}
return backingMap.containsValue(value);
}
@Override public V remove(Object key) {
if (key == null) {
throw new NullPointerException();
}
return backingMap.remove(key);
}
@Override public Set<Entry<K, V>> entrySet() {
return backingMap.entrySet();
}
public boolean contains(Object value) {
return containsValue(value);
}
public Enumeration<V> elements() {
return Collections.enumeration(values());
}
public Enumeration<K> keys() {
return Collections.enumeration(keySet());
}
}
| rgoldberg/guava | guava-gwt/src-super/java/util/super/java/util/concurrent/ConcurrentHashMap.java | Java | apache-2.0 | 3,773 |
sap.ui.define(['sap/ui/core/mvc/Controller'],
function(Controller) {
"use strict";
var MBController = Controller.extend("sap.m.sample.MenuButton.MB", {
onDefaultAction: function() {
sap.m.MessageToast.show("Default action triggered");
},
onDefaultActionAccept: function() {
sap.m.MessageToast.show("Accepted");
},
onMenuAction: function(oEvent) {
var oItem = oEvent.getParameter("item"),
sItemPath = "";
while (oItem instanceof sap.m.MenuItem) {
sItemPath = oItem.getText() + " > " + sItemPath;
oItem = oItem.getParent();
}
sItemPath = sItemPath.substr(0, sItemPath.lastIndexOf(" > "));
sap.m.MessageToast.show("Action triggered on item: " + sItemPath);
}
});
return MBController;
}); | openui5/packaged-sap.m | test-resources/sap/m/demokit/sample/MenuButton/MB.controller.js | JavaScript | apache-2.0 | 760 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse;
import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest;
import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RestartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RollbackResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
* <p>The protocol between an <code>ApplicationMaster</code> and a
* <code>NodeManager</code> to start/stop and increase resource of containers
* and to get status of running containers.</p>
*
* <p>If security is enabled the <code>NodeManager</code> verifies that the
* <code>ApplicationMaster</code> has truly been allocated the container
* by the <code>ResourceManager</code> and also verifies all interactions such
* as stopping the container or obtaining status information for the container.
* </p>
*/
@Public
@Stable
public interface ContainerManagementProtocol {
/**
* <p>
* The <code>ApplicationMaster</code> provides a list of
* {@link StartContainerRequest}s to a <code>NodeManager</code> to
* <em>start</em> {@link Container}s allocated to it using this interface.
* </p>
*
* <p>
* The <code>ApplicationMaster</code> has to provide details such as allocated
* resource capability, security tokens (if enabled), command to be executed
* to start the container, environment for the process, necessary
* binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
* the {@link StartContainerRequest}.
* </p>
*
* <p>
* The <code>NodeManager</code> sends a response via
* {@link StartContainersResponse} which includes a list of
* {@link Container}s of successfully launched {@link Container}s, a
* containerId-to-exception map for each failed {@link StartContainerRequest} in
* which the exception indicates errors from per container and a
* allServicesMetaData map between the names of auxiliary services and their
* corresponding meta-data. Note: None-container-specific exceptions will
* still be thrown by the API method itself.
* </p>
* <p>
* The <code>ApplicationMaster</code> can use
* {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
* statuses of the to-be-launched or launched containers.
* </p>
*
* @param request
* request to start a list of containers
* @return response including conatinerIds of all successfully launched
* containers, a containerId-to-exception map for failed requests and
* a allServicesMetaData map.
* @throws YarnException
* @throws IOException
* @throws NMNotYetReadyException
* This exception is thrown when NM starts from scratch but has not
* yet connected with RM.
*/
@Public
@Stable
StartContainersResponse startContainers(StartContainersRequest request)
throws YarnException, IOException;
/**
* <p>
* The <code>ApplicationMaster</code> requests a <code>NodeManager</code> to
* <em>stop</em> a list of {@link Container}s allocated to it using this
* interface.
* </p>
*
* <p>
* The <code>ApplicationMaster</code> sends a {@link StopContainersRequest}
* which includes the {@link ContainerId}s of the containers to be stopped.
* </p>
*
* <p>
* The <code>NodeManager</code> sends a response via
* {@link StopContainersResponse} which includes a list of {@link ContainerId}
* s of successfully stopped containers, a containerId-to-exception map for
* each failed request in which the exception indicates errors from per
* container. Note: None-container-specific exceptions will still be thrown by
* the API method itself. <code>ApplicationMaster</code> can use
* {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
* statuses of the containers.
* </p>
*
* @param request
* request to stop a list of containers
* @return response which includes a list of containerIds of successfully
* stopped containers, a containerId-to-exception map for failed
* requests.
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
StopContainersResponse stopContainers(StopContainersRequest request)
throws YarnException, IOException;
/**
* <p>
* The API used by the <code>ApplicationMaster</code> to request for current
* statuses of <code>Container</code>s from the <code>NodeManager</code>.
* </p>
*
* <p>
* The <code>ApplicationMaster</code> sends a
* {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
* of all containers whose statuses are needed.
* </p>
*
* <p>
* The <code>NodeManager</code> responds with
* {@link GetContainerStatusesResponse} which includes a list of
* {@link ContainerStatus} of the successfully queried containers and a
* containerId-to-exception map for each failed request in which the exception
* indicates errors from per container. Note: None-container-specific
* exceptions will still be thrown by the API method itself.
* </p>
*
* @param request
* request to get <code>ContainerStatus</code>es of containers with
* the specified <code>ContainerId</code>s
* @return response containing the list of <code>ContainerStatus</code> of the
* successfully queried containers and a containerId-to-exception map
* for failed requests.
*
* @throws YarnException
* @throws IOException
*/
@Public
@Stable
GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws YarnException,
IOException;
/**
* <p>
* The API used by the <code>ApplicationMaster</code> to request for
* resource increase of running containers on the <code>NodeManager</code>.
* </p>
*
* @param request
* request to increase resource of a list of containers
* @return response which includes a list of containerIds of containers
* whose resource has been successfully increased and a
* containerId-to-exception map for failed requests.
*
* @throws YarnException
* @throws IOException
*/
@Public
@Unstable
IncreaseContainersResourceResponse increaseContainersResource(
IncreaseContainersResourceRequest request) throws YarnException,
IOException;
SignalContainerResponse signalToContainer(SignalContainerRequest request)
throws YarnException, IOException;
/**
* Localize resources required by the container.
* Currently, this API only works for running containers.
*
* @param request Specify the resources to be localized.
* @return Response that the localize request is accepted.
* @throws YarnException Exception specific to YARN
* @throws IOException IOException thrown from the RPC layer.
*/
@Public
@Unstable
ResourceLocalizationResponse localize(ResourceLocalizationRequest request)
throws YarnException, IOException;
/**
* ReInitialize the Container with a new Launch Context.
* @param request Specify the new ContainerLaunchContext.
* @return Response that the ReInitialize request is accepted.
* @throws YarnException Exception specific to YARN.
* @throws IOException IOException thrown from the RPC layer.
*/
@Public
@Unstable
ReInitializeContainerResponse reInitializeContainer(
ReInitializeContainerRequest request) throws YarnException, IOException;
/**
* Restart the container.
* @param containerId Container Id.
* @return Response that the restart request is accepted.
* @throws YarnException Exception specific to YARN.
* @throws IOException IOException thrown from the RPC layer.
*/
@Public
@Unstable
RestartContainerResponse restartContainer(ContainerId containerId)
throws YarnException, IOException;
/**
* Rollback the Last ReInitialization if possible.
* @param containerId Container Id.
* @return Response that the rollback request is accepted.
* @throws YarnException Exception specific to YARN.
* @throws IOException IOException thrown from the RPC layer.
*/
@Public
@Unstable
RollbackResponse rollbackLastReInitialization(ContainerId containerId)
throws YarnException, IOException;
/**
* Commit the Last ReInitialization if possible. Once the reinitialization
* has been committed, It cannot be rolled back.
* @param containerId Container Id.
* @return Response that the commit request is accepted.
* @throws YarnException Exception specific to YARN.
* @throws IOException IOException thrown from the RPC layer.
*/
@Public
@Unstable
CommitResponse commitLastReInitialization(ContainerId containerId)
throws YarnException, IOException;
}
| WIgor/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java | Java | apache-2.0 | 11,258 |
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.batik.anim.timing;
/**
* An adapter class for {@link TimegraphListener}s.
*
* @author <a href="mailto:cam%40mcc%2eid%2eau">Cameron McCormack</a>
* @version $Id: TimegraphAdapter.java 475477 2006-11-15 22:44:28Z cam $
*/
public class TimegraphAdapter implements TimegraphListener {
/**
* Invoked to indicate that a timed element has been added to the
* document.
*/
public void elementAdded(TimedElement e) {
}
/**
* Invoked to indicate that a timed element has been removed from the
* document.
*/
public void elementRemoved(TimedElement e) {
}
/**
* Invoked to indicate that a timed element has become active.
* @param e the TimedElement that became active
* @param t the time (in parent simple time) that the element became active
*/
public void elementActivated(TimedElement e, float t) {
}
/**
* Invoked to indicate that a timed element has become inactive
* and is filling.
*/
public void elementFilled(TimedElement e, float t) {
}
/**
* Invoked to indicate that a timed element has become inactive
* and is not filling.
*/
public void elementDeactivated(TimedElement e, float t) {
}
/**
* Invoked to indivate that an interval was created for the given
* timed element.
*/
public void intervalCreated(TimedElement e, Interval i) {
}
/**
* Invoked to indivate that an interval was removed for the given
* timed element.
*/
public void intervalRemoved(TimedElement e, Interval i) {
}
/**
* Invoked to indivate that an interval's endpoints were changed.
*/
public void intervalChanged(TimedElement e, Interval i) {
}
/**
* Invoked to indivate that the given interval began.
* @param i the Interval that began, or null if no interval is
* active for the given timed element.
*/
public void intervalBegan(TimedElement e, Interval i) {
}
/**
* Invoked to indicate that the given timed element began a repeat
* iteration at the specified time.
*/
public void elementRepeated(TimedElement e, int i, float t) {
}
/**
* Invoked to indicate that the list of instance times for the given
* timed element has been updated.
*/
public void elementInstanceTimesChanged(TimedElement e, float isBegin) {
}
}
| sflyphotobooks/crp-batik | sources/org/apache/batik/anim/timing/TimegraphAdapter.java | Java | apache-2.0 | 3,242 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.cache.binary;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.binary.BinaryObject;
import org.apache.ignite.binary.BinaryObjectBuilder;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cache.CacheWriteSynchronizationMode;
import org.apache.ignite.cluster.ClusterGroup;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.internal.GridKernalContext;
import org.apache.ignite.internal.GridTopic;
import org.apache.ignite.internal.IgniteEx;
import org.apache.ignite.internal.binary.BinaryMarshaller;
import org.apache.ignite.internal.managers.communication.GridIoManager;
import org.apache.ignite.internal.managers.communication.GridMessageListener;
import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
import org.apache.ignite.internal.util.IgniteUtils;
import org.apache.ignite.internal.util.future.GridFutureAdapter;
import org.apache.ignite.internal.util.typedef.PA;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.lang.IgniteCallable;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
import org.apache.ignite.spi.discovery.DiscoverySpiListener;
import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
import org.apache.ignite.testframework.GridTestUtils;
import org.apache.ignite.testframework.GridTestUtils.DiscoveryHook;
import org.apache.ignite.testframework.GridTestUtils.DiscoverySpiListenerWrapper;
import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
import org.jetbrains.annotations.Nullable;
import org.junit.Test;
/**
*
*/
public class GridCacheBinaryObjectMetadataExchangeMultinodeTest extends GridCommonAbstractTest {
/** */
private boolean clientMode;
/** */
private boolean applyDiscoveryHook;
/** */
private DiscoveryHook discoveryHook;
/** */
private static final String BINARY_TYPE_NAME = "TestBinaryType";
/** */
private static final int BINARY_TYPE_ID = 708045005;
/** */
private static final long MAX_AWAIT = 9_000;
/** */
private static final AtomicInteger metadataReqsCounter = new AtomicInteger(0);
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(gridName);
if (applyDiscoveryHook) {
final DiscoveryHook hook = discoveryHook != null ? discoveryHook : new DiscoveryHook();
cfg.setDiscoverySpi(new TcpDiscoverySpi() {
@Override public void setListener(@Nullable DiscoverySpiListener lsnr) {
super.setListener(DiscoverySpiListenerWrapper.wrap(lsnr, hook));
}
});
}
((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(sharedStaticIpFinder);
cfg.setMarshaller(new BinaryMarshaller());
cfg.setClientMode(clientMode);
CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME);
ccfg.setCacheMode(CacheMode.REPLICATED);
ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
cfg.setCacheConfiguration(ccfg);
return cfg;
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
super.afterTest();
stopAllGrids();
}
/**
* Verifies that if thread tries to read metadata with ongoing update it gets blocked
* until acknowledge message arrives.
*/
@Test
public void testReadRequestBlockedOnUpdatingMetadata() throws Exception {
final CyclicBarrier barrier = new CyclicBarrier(2);
applyDiscoveryHook = false;
final Ignite ignite0 = startGrid(0);
final Ignite ignite1 = startGrid(1);
final GridFutureAdapter finishFut = new GridFutureAdapter();
applyDiscoveryHook = true;
discoveryHook = new DiscoveryHook() {
private volatile IgniteEx ignite;
@Override public void handleDiscoveryMessage(DiscoverySpiCustomMessage msg) {
if (finishFut.isDone())
return;
DiscoveryCustomMessage customMsg = msg == null ? null
: (DiscoveryCustomMessage) IgniteUtils.field(msg, "delegate");
if (customMsg instanceof MetadataUpdateAcceptedMessage) {
MetadataUpdateAcceptedMessage acceptedMsg = (MetadataUpdateAcceptedMessage)customMsg;
if (acceptedMsg.typeId() == BINARY_TYPE_ID && acceptedMsg.acceptedVersion() == 2) {
Object binaryProc = U.field(ignite.context(), "cacheObjProc");
Object transport = U.field(binaryProc, "transport");
try {
barrier.await(MAX_AWAIT, TimeUnit.MILLISECONDS);
Map syncMap = U.field(transport, "syncMap");
GridTestUtils.waitForCondition(new PA() {
@Override public boolean apply() {
return syncMap.size() == 1;
}
}, MAX_AWAIT);
assertEquals("unexpected size of syncMap: ", 1, syncMap.size());
Object syncKey = syncMap.keySet().iterator().next();
int typeId = U.field(syncKey, "typeId");
assertEquals("unexpected typeId: ", BINARY_TYPE_ID, typeId);
int ver = U.field(syncKey, "ver");
assertEquals("unexpected pendingVersion: ", 2, ver);
finishFut.onDone();
}
catch (Throwable t) {
finishFut.onDone(t);
}
}
}
}
@Override public void ignite(IgniteEx ignite) {
this.ignite = ignite;
}
};
final IgniteEx ignite2 = startGrid(2);
discoveryHook.ignite(ignite2);
// Unfinished PME may affect max await timeout.
awaitPartitionMapExchange();
// Update metadata (version 1).
ignite0.executorService(ignite0.cluster().forLocal()).submit(new Runnable() {
@Override public void run() {
addIntField(ignite0, "f1", 101, 1);
}
}).get();
// Update metadata (version 2).
ignite1.executorService(ignite1.cluster().forLocal()).submit(new Runnable() {
@Override public void run() {
addStringField(ignite1, "f2", "str", 2);
}
});
// Read metadata.
IgniteFuture readFut = ignite2.compute(ignite2.cluster().forLocal()).callAsync(new IgniteCallable<Object>() {
@Override public Object call() throws Exception {
barrier.await(MAX_AWAIT, TimeUnit.MILLISECONDS);
return ((BinaryObject) ignite2.cache(DEFAULT_CACHE_NAME).withKeepBinary().get(1)).field("f1");
}
});
finishFut.get(MAX_AWAIT);
assertEquals(101, readFut.get(MAX_AWAIT));
}
/**
* Verifies that all sequential updates that don't introduce any conflicts are accepted and observed by all nodes.
*/
@Test
public void testSequentialUpdatesNoConflicts() throws Exception {
IgniteEx ignite0 = startGrid(0);
final IgniteEx ignite1 = startGrid(1);
final String intFieldName = "f1";
ignite1.executorService().submit(new Runnable() {
@Override public void run() {
addIntField(ignite1, intFieldName, 101, 1);
}
}).get();
int fld = ((BinaryObject) ignite0.cache(DEFAULT_CACHE_NAME).withKeepBinary().get(1)).field(intFieldName);
assertEquals(fld, 101);
final IgniteEx ignite2 = startGrid(2);
final String strFieldName = "f2";
ignite2.executorService().submit(new Runnable() {
@Override public void run() {
addStringField(ignite2, strFieldName, "str", 2);
}
}).get();
assertEquals(((BinaryObject)ignite1.cache(DEFAULT_CACHE_NAME).withKeepBinary().get(2)).field(strFieldName), "str");
}
/**
* Verifies that client is able to detect obsolete metadata situation and request up-to-date from the cluster.
*/
@Test
public void testClientRequestsUpToDateMetadata() throws Exception {
final IgniteEx ignite0 = startGrid(0);
final IgniteEx ignite1 = startGrid(1);
ignite0.executorService().submit(new Runnable() {
@Override public void run() {
addIntField(ignite0, "f1", 101, 1);
}
}).get();
final Ignite client = startDeafClient("client");
ClusterGroup clientGrp = client.cluster().forClients();
final String strVal = "strVal101";
ignite1.executorService().submit(new Runnable() {
@Override public void run() {
addStringField(ignite1, "f2", strVal, 1);
}
}).get();
String res = client.compute(clientGrp).call(new IgniteCallable<String>() {
@Override public String call() throws Exception {
return ((BinaryObject)client.cache(DEFAULT_CACHE_NAME).withKeepBinary().get(1)).field("f2");
}
});
assertEquals(strVal, res);
}
/**
* Verifies that client resends request for up-to-date metadata in case of failure on server received first request.
*/
@Test
public void testClientRequestsUpToDateMetadataOneNodeDies() throws Exception {
final Ignite srv0 = startGrid(0);
replaceWithStoppingMappingRequestListener(((GridKernalContext)U.field(srv0, "ctx")).io(), 0);
final Ignite srv1 = startGrid(1);
replaceWithCountingMappingRequestListener(((GridKernalContext)U.field(srv1, "ctx")).io());
final Ignite srv2 = startGrid(2);
replaceWithCountingMappingRequestListener(((GridKernalContext)U.field(srv2, "ctx")).io());
final Ignite client = startDeafClient("client");
ClusterGroup clientGrp = client.cluster().forClients();
srv0.executorService().submit(new Runnable() {
@Override public void run() {
addStringField(srv0, "f2", "strVal101", 0);
}
}).get();
client.compute(clientGrp).call(new IgniteCallable<String>() {
@Override public String call() throws Exception {
return ((BinaryObject)client.cache(DEFAULT_CACHE_NAME).withKeepBinary().get(0)).field("f2");
}
});
assertEquals(metadataReqsCounter.get(), 2);
}
/**
* Starts client node that skips <b>MetadataUpdateProposedMessage</b> and <b>MetadataUpdateAcceptedMessage</b>
* messages.
*
* @param clientName name of client node.
*/
private Ignite startDeafClient(String clientName) throws Exception {
clientMode = true;
applyDiscoveryHook = true;
discoveryHook = new DiscoveryHook() {
@Override public void handleDiscoveryMessage(DiscoverySpiCustomMessage msg) {
DiscoveryCustomMessage customMsg = msg == null ? null
: (DiscoveryCustomMessage) IgniteUtils.field(msg, "delegate");
if (customMsg instanceof MetadataUpdateProposedMessage) {
if (((MetadataUpdateProposedMessage) customMsg).typeId() == BINARY_TYPE_ID)
GridTestUtils.setFieldValue(customMsg, "typeId", 1);
}
else if (customMsg instanceof MetadataUpdateAcceptedMessage) {
if (((MetadataUpdateAcceptedMessage) customMsg).typeId() == BINARY_TYPE_ID)
GridTestUtils.setFieldValue(customMsg, "typeId", 1);
}
}
};
Ignite client = startGrid(clientName);
clientMode = false;
applyDiscoveryHook = false;
return client;
}
/**
*
*/
private void replaceWithStoppingMappingRequestListener(GridIoManager ioMgr, final int nodeIdToStop) {
ioMgr.removeMessageListener(GridTopic.TOPIC_METADATA_REQ);
ioMgr.addMessageListener(GridTopic.TOPIC_METADATA_REQ, new GridMessageListener() {
@Override public void onMessage(UUID nodeId, Object msg, byte plc) {
new Thread(new Runnable() {
@Override public void run() {
metadataReqsCounter.incrementAndGet();
stopGrid(nodeIdToStop, true);
}
}).start();
}
});
}
/**
*
*/
private void replaceWithCountingMappingRequestListener(GridIoManager ioMgr) {
GridMessageListener[] lsnrs = U.field(ioMgr, "sysLsnrs");
final GridMessageListener delegate = lsnrs[GridTopic.TOPIC_METADATA_REQ.ordinal()];
GridMessageListener wrapper = new GridMessageListener() {
@Override public void onMessage(UUID nodeId, Object msg, byte plc) {
metadataReqsCounter.incrementAndGet();
delegate.onMessage(nodeId, msg, plc);
}
};
lsnrs[GridTopic.TOPIC_METADATA_REQ.ordinal()] = wrapper;
}
/**
* Adds field of integer type to fixed binary type.
*
* @param ignite Ignite.
* @param fieldName Field name.
* @param fieldVal Field value.
* @param cacheIdx Cache index.
*/
private void addIntField(Ignite ignite, String fieldName, int fieldVal, int cacheIdx) {
BinaryObjectBuilder builder = ignite.binary().builder(BINARY_TYPE_NAME);
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME).withKeepBinary();
builder.setField(fieldName, fieldVal);
cache.put(cacheIdx, builder.build());
}
/**
* Adds field of String type to fixed binary type.
*
* @param ignite Ignite.
* @param fieldName Field name.
* @param fieldVal Field value.
* @param cacheIdx Cache index.
*/
private void addStringField(Ignite ignite, String fieldName, String fieldVal, int cacheIdx) {
BinaryObjectBuilder builder = ignite.binary().builder(BINARY_TYPE_NAME);
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME).withKeepBinary();
builder.setField(fieldName, fieldVal);
cache.put(cacheIdx, builder.build());
}
}
| shroman/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryObjectMetadataExchangeMultinodeTest.java | Java | apache-2.0 | 15,798 |
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.channel.core;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import org.jboss.netty.util.ExternalResourceReleasable;
/**
* A region of a file that is sent via a {@link Channel} which supports
* <a href="http://en.wikipedia.org/wiki/Zero-copy">zero-copy file transfer</a>.
*
* <h3>Upgrade your JDK / JRE</h3>
*
* {@link FileChannel#transferTo(long, long, WritableByteChannel)} has at least
* four known bugs in the old versions of Sun JDK and perhaps its derived ones.
* Please upgrade your JDK to 1.6.0_18 or later version if you are going to use
* zero-copy file transfer.
* <ul>
* <li><a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=5103988">5103988</a>
* - FileChannel.transferTo() should return -1 for EAGAIN instead throws IOException</li>
* <li><a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6253145">6253145</a>
* - FileChannel.transferTo() on Linux fails when going beyond 2GB boundary</li>
* <li><a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6427312">6427312</a>
* - FileChannel.transferTo() throws IOException "system call interrupted"</li>
* <li><a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6524172">6470086</a>
* - FileChannel.transferTo(2147483647, 1, channel) causes "Value too large" exception</li>
* </ul>
*
* <h3>Check your operating system and JDK / JRE</h3>
*
* If your operating system (or JDK / JRE) does not support zero-copy file
* transfer, sending a file with {@link FileRegion} might fail or yield worse
* performance. For example, sending a large file doesn't work well in Windows.
*
* <h3>Not all transports support it</h3>
*
* Currently, the NIO transport is the only transport that supports {@link FileRegion}.
* Attempting to write a {@link FileRegion} to non-NIO {@link Channel} will trigger
* a {@link ClassCastException} or a similar exception.
*/
public interface FileRegion extends ExternalResourceReleasable {
// FIXME Make sure all transports support writing a FileRegion
// Even if zero copy cannot be achieved, all transports should emulate it.
/**
* Returns the offset in the file where the transfer began.
*/
long getPosition();
/**
* Returns the number of bytes to transfer.
*/
long getCount();
/**
* Transfers the content of this file region to the specified channel.
*
* @param target the destination of the transfer
* @param position the relative offset of the file where the transfer
* begins from. For example, <tt>0</tt> will make the
* transfer start from {@link #getPosition()}th byte and
* <tt>{@link #getCount()} - 1</tt> will make the last
* byte of the region transferred.
*/
long transferTo(WritableByteChannel target, long position) throws IOException;
}
| xiexingguang/simple-netty-source | src/main/java/org/jboss/netty/channel/core/FileRegion.java | Java | apache-2.0 | 3,614 |
/*
* #%L
* Wisdom-Framework
* %%
* Copyright (C) 2013 - 2014 Wisdom Framework
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.wisdom.content.bodyparsers;
import com.google.common.collect.ImmutableList;
import org.apache.felix.ipojo.annotations.Component;
import org.apache.felix.ipojo.annotations.Instantiate;
import org.apache.felix.ipojo.annotations.Provides;
import org.apache.felix.ipojo.annotations.Requires;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.wisdom.api.content.BodyParser;
import org.wisdom.api.content.ParameterFactories;
import org.wisdom.api.http.Context;
import org.wisdom.api.http.FileItem;
import org.wisdom.api.http.MimeTypes;
import org.wisdom.content.converters.ReflectionHelper;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Type;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@Component
@Provides
@Instantiate
public class BodyParserForm implements BodyParser {
private static final Logger LOGGER = LoggerFactory.getLogger(BodyParser.class);
private static final String ERROR_KEY = "Error parsing incoming form data for key ";
private static final String ERROR_AND = " and value ";
@Requires
ParameterFactories converters;
/**
* Creates a object of class T from a form sent in the request.
*
* @param context The context
* @param classOfT The class we expect
* @param genericType the generic type (ignored)
* @param <T> the class ot the object to build
* @return the object, {@code null} if the object cannot be built.
*/
@Override
public <T> T invoke(Context context, Class<T> classOfT, Type genericType) {
T t;
try {
t = classOfT.newInstance();
} catch (Exception e) {
LOGGER.error("Failed to create a new instance of {}", classOfT, e);
return null;
}
Map<String, ReflectionHelper.Property> properties = ReflectionHelper.getProperties(classOfT, genericType);
// 1) Query parameters
for (Entry<String, List<String>> ent : context.parameters().entrySet()) {
try {
ReflectionHelper.Property property = properties.get(ent.getKey());
if (property != null) {
Object value = converters.convertValues(ent.getValue(), property.getClassOfProperty(),
property.getGenericTypeOfProperty(),
null);
property.set(t, value);
}
} catch (Exception e) {
LOGGER.warn(ERROR_KEY + ent.getKey() + ERROR_AND + ent.getValue(), e);
}
}
// 2) Path parameters
final Map<String, String> fromPath = context.route().getPathParametersEncoded(context.request().uri());
for (Entry<String, String> ent : fromPath
.entrySet()) {
try {
ReflectionHelper.Property property = properties.get(ent.getKey());
if (property != null) {
Object value = converters.convertValue(ent.getValue(), property.getClassOfProperty(),
property.getGenericTypeOfProperty(), null);
property.set(t, value);
}
} catch (Exception e) {
// Path parameter are rarely used in form, so, set the log level to 'debug'.
LOGGER.debug(ERROR_KEY + ent.getKey() + ERROR_AND + ent.getValue(), e);
}
}
// 3) Forms.
if (context.form() == null || context.form().isEmpty()) {
return t;
}
for (Entry<String, List<String>> ent : context.form().entrySet()) {
try {
ReflectionHelper.Property property = properties.get(ent.getKey());
if (property != null) {
Object value = converters.convertValues(ent.getValue(), property.getClassOfProperty(),
property.getGenericTypeOfProperty(), null);
property.set(t, value);
}
} catch (Exception e) {
LOGGER.warn(ERROR_KEY + ent.getKey() + ERROR_AND + ent.getValue(), e);
}
}
// 4) File Items.
if (context.files() == null || context.files().isEmpty()) {
return t;
}
for (FileItem item : context.files()) {
try {
ReflectionHelper.Property property = properties.get(item.field());
if (property != null) {
if (InputStream.class.isAssignableFrom(property.getClassOfProperty())) {
property.set(t, item.stream());
} else if (FileItem.class.isAssignableFrom(property.getClassOfProperty())) {
property.set(t, item);
} else if (property.getClassOfProperty().isArray()
&& property.getClassOfProperty().getComponentType().equals(Byte.TYPE)) {
property.set(t, item.bytes());
}
}
} catch (Exception e) {
LOGGER.warn(ERROR_KEY + item.field() + ERROR_AND + item, e);
}
}
return t;
}
/**
* Creates a object of class T from a form sent in the request.
*
* @param context The context
* @param classOfT The class we expect
* @param <T> the class ot the object to build
* @return the object, {@code null} if the object cannot be built.
*/
@Override
public <T> T invoke(Context context, Class<T> classOfT) {
return invoke(context, classOfT, null);
}
/**
* Unsupported operation.
*
* @param bytes the content
* @param classOfT The class we expect
* @param <T> the class
* @return nothing as this method is not supported
*/
@Override
public <T> T invoke(byte[] bytes, Class<T> classOfT) {
throw new UnsupportedOperationException("Cannot bind a raw byte[] to a form object");
}
/**
* @return a list containing {@code application/x-www-form-urlencoded} and {@code multipart/form}.
*/
public List<String> getContentTypes() {
return ImmutableList.of(MimeTypes.FORM, MimeTypes.MULTIPART);
}
}
| torito/wisdom | core/content-manager/src/main/java/org/wisdom/content/bodyparsers/BodyParserForm.java | Java | apache-2.0 | 6,932 |
package org.apache.maven.model.profile;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Named;
import javax.inject.Singleton;
import org.apache.maven.model.Build;
import org.apache.maven.model.BuildBase;
import org.apache.maven.model.Model;
import org.apache.maven.model.ModelBase;
import org.apache.maven.model.Plugin;
import org.apache.maven.model.PluginContainer;
import org.apache.maven.model.PluginExecution;
import org.apache.maven.model.Profile;
import org.apache.maven.model.ReportPlugin;
import org.apache.maven.model.ReportSet;
import org.apache.maven.model.Reporting;
import org.apache.maven.model.building.ModelBuildingRequest;
import org.apache.maven.model.building.ModelProblemCollector;
import org.apache.maven.model.merge.MavenModelMerger;
/**
* Handles profile injection into the model.
*
* @author Benjamin Bentmann
*/
@Named
@Singleton
@SuppressWarnings( { "checkstyle:methodname" } )
public class DefaultProfileInjector
implements ProfileInjector
{
private ProfileModelMerger merger = new ProfileModelMerger();
@Override
public void injectProfile( Model model, Profile profile, ModelBuildingRequest request,
ModelProblemCollector problems )
{
if ( profile != null )
{
merger.mergeModelBase( model, profile );
if ( profile.getBuild() != null )
{
if ( model.getBuild() == null )
{
model.setBuild( new Build() );
}
merger.mergeBuildBase( model.getBuild(), profile.getBuild() );
}
}
}
/**
* ProfileModelMerger
*/
protected static class ProfileModelMerger
extends MavenModelMerger
{
public void mergeModelBase( ModelBase target, ModelBase source )
{
mergeModelBase( target, source, true, Collections.emptyMap() );
}
public void mergeBuildBase( BuildBase target, BuildBase source )
{
mergeBuildBase( target, source, true, Collections.emptyMap() );
}
@Override
protected void mergePluginContainer_Plugins( PluginContainer target, PluginContainer source,
boolean sourceDominant, Map<Object, Object> context )
{
List<Plugin> src = source.getPlugins();
if ( !src.isEmpty() )
{
List<Plugin> tgt = target.getPlugins();
Map<Object, Plugin> master = new LinkedHashMap<>( tgt.size() * 2 );
for ( Plugin element : tgt )
{
Object key = getPluginKey().apply( element );
master.put( key, element );
}
Map<Object, List<Plugin>> predecessors = new LinkedHashMap<>();
List<Plugin> pending = new ArrayList<>();
for ( Plugin element : src )
{
Object key = getPluginKey().apply( element );
Plugin existing = master.get( key );
if ( existing != null )
{
mergePlugin( existing, element, sourceDominant, context );
if ( !pending.isEmpty() )
{
predecessors.put( key, pending );
pending = new ArrayList<>();
}
}
else
{
pending.add( element );
}
}
List<Plugin> result = new ArrayList<>( src.size() + tgt.size() );
for ( Map.Entry<Object, Plugin> entry : master.entrySet() )
{
List<Plugin> pre = predecessors.get( entry.getKey() );
if ( pre != null )
{
result.addAll( pre );
}
result.add( entry.getValue() );
}
result.addAll( pending );
target.setPlugins( result );
}
}
@Override
protected void mergePlugin_Executions( Plugin target, Plugin source, boolean sourceDominant,
Map<Object, Object> context )
{
List<PluginExecution> src = source.getExecutions();
if ( !src.isEmpty() )
{
List<PluginExecution> tgt = target.getExecutions();
Map<Object, PluginExecution> merged =
new LinkedHashMap<>( ( src.size() + tgt.size() ) * 2 );
for ( PluginExecution element : tgt )
{
Object key = getPluginExecutionKey().apply( element );
merged.put( key, element );
}
for ( PluginExecution element : src )
{
Object key = getPluginExecutionKey().apply( element );
PluginExecution existing = merged.get( key );
if ( existing != null )
{
mergePluginExecution( existing, element, sourceDominant, context );
}
else
{
merged.put( key, element );
}
}
target.setExecutions( new ArrayList<>( merged.values() ) );
}
}
@Override
protected void mergeReporting_Plugins( Reporting target, Reporting source, boolean sourceDominant,
Map<Object, Object> context )
{
List<ReportPlugin> src = source.getPlugins();
if ( !src.isEmpty() )
{
List<ReportPlugin> tgt = target.getPlugins();
Map<Object, ReportPlugin> merged =
new LinkedHashMap<>( ( src.size() + tgt.size() ) * 2 );
for ( ReportPlugin element : tgt )
{
Object key = getReportPluginKey().apply( element );
merged.put( key, element );
}
for ( ReportPlugin element : src )
{
Object key = getReportPluginKey().apply( element );
ReportPlugin existing = merged.get( key );
if ( existing == null )
{
merged.put( key, element );
}
else
{
mergeReportPlugin( existing, element, sourceDominant, context );
}
}
target.setPlugins( new ArrayList<>( merged.values() ) );
}
}
@Override
protected void mergeReportPlugin_ReportSets( ReportPlugin target, ReportPlugin source, boolean sourceDominant,
Map<Object, Object> context )
{
List<ReportSet> src = source.getReportSets();
if ( !src.isEmpty() )
{
List<ReportSet> tgt = target.getReportSets();
Map<Object, ReportSet> merged = new LinkedHashMap<>( ( src.size() + tgt.size() ) * 2 );
for ( ReportSet element : tgt )
{
Object key = getReportSetKey().apply( element );
merged.put( key, element );
}
for ( ReportSet element : src )
{
Object key = getReportSetKey().apply( element );
ReportSet existing = merged.get( key );
if ( existing != null )
{
mergeReportSet( existing, element, sourceDominant, context );
}
else
{
merged.put( key, element );
}
}
target.setReportSets( new ArrayList<>( merged.values() ) );
}
}
}
}
| olamy/maven | maven-model-builder/src/main/java/org/apache/maven/model/profile/DefaultProfileInjector.java | Java | apache-2.0 | 9,121 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.rest.handler.job;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.executiongraph.AccessExecutionVertex;
import org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph;
import org.apache.flink.runtime.executiongraph.ErrorInfo;
import org.apache.flink.runtime.rest.handler.HandlerRequest;
import org.apache.flink.runtime.rest.handler.legacy.ExecutionGraphCache;
import org.apache.flink.runtime.rest.messages.EmptyRequestBody;
import org.apache.flink.runtime.rest.messages.JobExceptionsInfo;
import org.apache.flink.runtime.rest.messages.JobExceptionsInfoWithHistory;
import org.apache.flink.runtime.rest.messages.JobIDPathParameter;
import org.apache.flink.runtime.rest.messages.MessageHeaders;
import org.apache.flink.runtime.rest.messages.ResponseBody;
import org.apache.flink.runtime.rest.messages.job.JobExceptionsMessageParameters;
import org.apache.flink.runtime.rest.messages.job.UpperLimitExceptionParameter;
import org.apache.flink.runtime.scheduler.ExecutionGraphInfo;
import org.apache.flink.runtime.scheduler.exceptionhistory.ExceptionHistoryEntry;
import org.apache.flink.runtime.scheduler.exceptionhistory.RootExceptionHistoryEntry;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.runtime.webmonitor.RestfulGateway;
import org.apache.flink.runtime.webmonitor.history.ArchivedJson;
import org.apache.flink.runtime.webmonitor.history.JsonArchivist;
import org.apache.flink.runtime.webmonitor.retriever.GatewayRetriever;
import org.apache.flink.util.Preconditions;
import org.apache.flink.shaded.curator5.com.google.common.collect.Iterables;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Executor;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
/** Handler serving the job exceptions. */
public class JobExceptionsHandler
extends AbstractExecutionGraphHandler<
JobExceptionsInfoWithHistory, JobExceptionsMessageParameters>
implements JsonArchivist {
static final int MAX_NUMBER_EXCEPTION_TO_REPORT = 20;
public JobExceptionsHandler(
GatewayRetriever<? extends RestfulGateway> leaderRetriever,
Time timeout,
Map<String, String> responseHeaders,
MessageHeaders<
EmptyRequestBody,
JobExceptionsInfoWithHistory,
JobExceptionsMessageParameters>
messageHeaders,
ExecutionGraphCache executionGraphCache,
Executor executor) {
super(
leaderRetriever,
timeout,
responseHeaders,
messageHeaders,
executionGraphCache,
executor);
}
@Override
protected JobExceptionsInfoWithHistory handleRequest(
HandlerRequest<EmptyRequestBody> request, ExecutionGraphInfo executionGraph) {
List<Integer> exceptionToReportMaxSizes =
request.getQueryParameter(UpperLimitExceptionParameter.class);
final int exceptionToReportMaxSize =
exceptionToReportMaxSizes.size() > 0
? exceptionToReportMaxSizes.get(0)
: MAX_NUMBER_EXCEPTION_TO_REPORT;
return createJobExceptionsInfo(executionGraph, exceptionToReportMaxSize);
}
@Override
public Collection<ArchivedJson> archiveJsonWithPath(ExecutionGraphInfo executionGraphInfo)
throws IOException {
ResponseBody json =
createJobExceptionsInfo(executionGraphInfo, MAX_NUMBER_EXCEPTION_TO_REPORT);
String path =
getMessageHeaders()
.getTargetRestEndpointURL()
.replace(
':' + JobIDPathParameter.KEY,
executionGraphInfo.getJobId().toString());
return Collections.singletonList(new ArchivedJson(path, json));
}
private static JobExceptionsInfoWithHistory createJobExceptionsInfo(
ExecutionGraphInfo executionGraphInfo, int exceptionToReportMaxSize) {
final ArchivedExecutionGraph executionGraph =
executionGraphInfo.getArchivedExecutionGraph();
if (executionGraph.getFailureInfo() == null) {
return new JobExceptionsInfoWithHistory(
createJobExceptionHistory(
executionGraphInfo.getExceptionHistory(), exceptionToReportMaxSize));
}
List<JobExceptionsInfo.ExecutionExceptionInfo> taskExceptionList = new ArrayList<>();
boolean truncated = false;
for (AccessExecutionVertex task : executionGraph.getAllExecutionVertices()) {
Optional<ErrorInfo> failure = task.getFailureInfo();
if (failure.isPresent()) {
if (taskExceptionList.size() >= exceptionToReportMaxSize) {
truncated = true;
break;
}
TaskManagerLocation location = task.getCurrentAssignedResourceLocation();
String locationString = toString(location);
long timestamp = task.getStateTimestamp(ExecutionState.FAILED);
taskExceptionList.add(
new JobExceptionsInfo.ExecutionExceptionInfo(
failure.get().getExceptionAsString(),
task.getTaskNameWithSubtaskIndex(),
locationString,
timestamp == 0 ? -1 : timestamp));
}
}
final ErrorInfo rootCause = executionGraph.getFailureInfo();
return new JobExceptionsInfoWithHistory(
rootCause.getExceptionAsString(),
rootCause.getTimestamp(),
taskExceptionList,
truncated,
createJobExceptionHistory(
executionGraphInfo.getExceptionHistory(), exceptionToReportMaxSize));
}
private static JobExceptionsInfoWithHistory.JobExceptionHistory createJobExceptionHistory(
Iterable<RootExceptionHistoryEntry> historyEntries, int limit) {
// we need to reverse the history to have a stable result when doing paging on it
final List<RootExceptionHistoryEntry> reversedHistoryEntries = new ArrayList<>();
Iterables.addAll(reversedHistoryEntries, historyEntries);
Collections.reverse(reversedHistoryEntries);
List<JobExceptionsInfoWithHistory.RootExceptionInfo> exceptionHistoryEntries =
reversedHistoryEntries.stream()
.limit(limit)
.map(JobExceptionsHandler::createRootExceptionInfo)
.collect(Collectors.toList());
return new JobExceptionsInfoWithHistory.JobExceptionHistory(
exceptionHistoryEntries,
exceptionHistoryEntries.size() < reversedHistoryEntries.size());
}
private static JobExceptionsInfoWithHistory.RootExceptionInfo createRootExceptionInfo(
RootExceptionHistoryEntry historyEntry) {
final List<JobExceptionsInfoWithHistory.ExceptionInfo> concurrentExceptions =
StreamSupport.stream(historyEntry.getConcurrentExceptions().spliterator(), false)
.map(JobExceptionsHandler::createExceptionInfo)
.collect(Collectors.toList());
if (historyEntry.isGlobal()) {
return new JobExceptionsInfoWithHistory.RootExceptionInfo(
historyEntry.getException().getOriginalErrorClassName(),
historyEntry.getExceptionAsString(),
historyEntry.getTimestamp(),
concurrentExceptions);
}
assertLocalExceptionInfo(historyEntry);
return new JobExceptionsInfoWithHistory.RootExceptionInfo(
historyEntry.getException().getOriginalErrorClassName(),
historyEntry.getExceptionAsString(),
historyEntry.getTimestamp(),
historyEntry.getFailingTaskName(),
toString(historyEntry.getTaskManagerLocation()),
concurrentExceptions);
}
private static JobExceptionsInfoWithHistory.ExceptionInfo createExceptionInfo(
ExceptionHistoryEntry exceptionHistoryEntry) {
assertLocalExceptionInfo(exceptionHistoryEntry);
return new JobExceptionsInfoWithHistory.ExceptionInfo(
exceptionHistoryEntry.getException().getOriginalErrorClassName(),
exceptionHistoryEntry.getExceptionAsString(),
exceptionHistoryEntry.getTimestamp(),
exceptionHistoryEntry.getFailingTaskName(),
toString(exceptionHistoryEntry.getTaskManagerLocation()));
}
private static void assertLocalExceptionInfo(ExceptionHistoryEntry exceptionHistoryEntry) {
Preconditions.checkArgument(
exceptionHistoryEntry.getFailingTaskName() != null,
"The taskName must not be null for a non-global failure.");
}
@VisibleForTesting
static String toString(@Nullable TaskManagerLocation location) {
// '(unassigned)' being the default value is added to support backward-compatibility for the
// deprecated fields
return location != null
? taskManagerLocationToString(location.getFQDNHostname(), location.dataPort())
: "(unassigned)";
}
@VisibleForTesting
@Nullable
static String toString(@Nullable ExceptionHistoryEntry.ArchivedTaskManagerLocation location) {
return location != null
? taskManagerLocationToString(location.getFQDNHostname(), location.getPort())
: null;
}
private static String taskManagerLocationToString(String fqdnHostname, int port) {
return String.format("%s:%d", fqdnHostname, port);
}
}
| apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/job/JobExceptionsHandler.java | Java | apache-2.0 | 11,142 |
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exec
import "context"
// Discard silently discard all elements. It is implicitly inserted for any
// loose ends in the pipeline.
type Discard struct {
// UID is the unit identifier.
UID UnitID
}
func (d *Discard) ID() UnitID {
return d.UID
}
func (d *Discard) Up(ctx context.Context) error {
return nil
}
func (d *Discard) StartBundle(ctx context.Context, id string, data DataContext) error {
return nil
}
func (d *Discard) ProcessElement(ctx context.Context, value FullValue, values ...ReStream) error {
return nil
}
func (d *Discard) FinishBundle(ctx context.Context) error {
return nil
}
func (d *Discard) Down(ctx context.Context) error {
return nil
}
func (d *Discard) String() string {
return "Discard"
}
| rangadi/beam | sdks/go/pkg/beam/core/runtime/exec/discard.go | GO | apache-2.0 | 1,532 |
#include "editor/osm_auth.hpp"
#include "platform/http_client.hpp"
#include "coding/url.hpp"
#include "base/assert.hpp"
#include "base/logging.hpp"
#include "base/string_utils.hpp"
#include <iostream>
#include <map>
#include "private.h"
#include "3party/liboauthcpp/include/liboauthcpp/liboauthcpp.h"
using namespace std;
using platform::HttpClient;
namespace osm
{
constexpr char const * kApiVersion = "/api/0.6";
constexpr char const * kFacebookCallbackPart = "/auth/facebook_access_token/callback?access_token=";
constexpr char const * kGoogleCallbackPart = "/auth/google_oauth2_access_token/callback?access_token=";
constexpr char const * kFacebookOAuthPart = "/auth/facebook?referer=%2Foauth%2Fauthorize%3Foauth_token%3D";
constexpr char const * kGoogleOAuthPart = "/auth/google?referer=%2Foauth%2Fauthorize%3Foauth_token%3D";
namespace
{
string FindAuthenticityToken(string const & body)
{
auto pos = body.find("name=\"authenticity_token\"");
if (pos == string::npos)
return string();
string const kValue = "value=\"";
auto start = body.find(kValue, pos);
if (start == string::npos)
return string();
start += kValue.length();
auto const end = body.find("\"", start);
return end == string::npos ? string() : body.substr(start, end - start);
}
string BuildPostRequest(map<string, string> const & params)
{
string result;
for (auto it = params.begin(); it != params.end(); ++it)
{
if (it != params.begin())
result += "&";
result += it->first + "=" + url::UrlEncode(it->second);
}
return result;
}
} // namespace
// static
bool OsmOAuth::IsValid(KeySecret const & ks) noexcept
{
return !(ks.first.empty() || ks.second.empty());
}
// static
bool OsmOAuth::IsValid(UrlRequestToken const & urt) noexcept
{
return !(urt.first.empty() || urt.second.first.empty() || urt.second.second.empty());
}
OsmOAuth::OsmOAuth(string const & consumerKey, string const & consumerSecret,
string const & baseUrl, string const & apiUrl) noexcept
: m_consumerKeySecret(consumerKey, consumerSecret), m_baseUrl(baseUrl), m_apiUrl(apiUrl)
{
}
// static
OsmOAuth OsmOAuth::ServerAuth() noexcept
{
#ifdef DEBUG
return IZServerAuth();
#else
return ProductionServerAuth();
#endif
}
// static
OsmOAuth OsmOAuth::ServerAuth(KeySecret const & userKeySecret) noexcept
{
OsmOAuth auth = ServerAuth();
auth.SetKeySecret(userKeySecret);
return auth;
}
// static
OsmOAuth OsmOAuth::IZServerAuth() noexcept
{
constexpr char const * kIZTestServer = "http://test.osmz.ru";
constexpr char const * kIZConsumerKey = "F0rURWssXDYxtm61279rHdyu3iSLYSP3LdF6DL3Y";
constexpr char const * kIZConsumerSecret = "IoR5TAedXxcybtd5tIBZqAK07rDRAuFMsQ4nhAP6";
return OsmOAuth(kIZConsumerKey, kIZConsumerSecret, kIZTestServer, kIZTestServer);
}
// static
OsmOAuth OsmOAuth::DevServerAuth() noexcept
{
constexpr char const * kOsmDevServer = "https://master.apis.dev.openstreetmap.org";
constexpr char const * kOsmDevConsumerKey = "eRtN6yKZZf34oVyBnyaVbsWtHIIeptLArQKdTwN3";
constexpr char const * kOsmDevConsumerSecret = "lC124mtm2VqvKJjSh35qBpKfrkeIjpKuGe38Hd1H";
return OsmOAuth(kOsmDevConsumerKey, kOsmDevConsumerSecret, kOsmDevServer, kOsmDevServer);
}
// static
OsmOAuth OsmOAuth::ProductionServerAuth() noexcept
{
constexpr char const * kOsmMainSiteURL = "https://www.openstreetmap.org";
constexpr char const * kOsmApiURL = "https://api.openstreetmap.org";
return OsmOAuth(OSM_CONSUMER_KEY, OSM_CONSUMER_SECRET, kOsmMainSiteURL, kOsmApiURL);
}
void OsmOAuth::SetKeySecret(KeySecret const & keySecret) noexcept { m_tokenKeySecret = keySecret; }
KeySecret const & OsmOAuth::GetKeySecret() const noexcept { return m_tokenKeySecret; }
bool OsmOAuth::IsAuthorized() const noexcept{ return IsValid(m_tokenKeySecret); }
// Opens a login page and extract a cookie and a secret token.
OsmOAuth::SessionID OsmOAuth::FetchSessionId(string const & subUrl, string const & cookies) const
{
string const url = m_baseUrl + subUrl + (cookies.empty() ? "?cookie_test=true" : "");
HttpClient request(url);
request.SetCookies(cookies);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("FetchSessionId Network error while connecting to", url));
if (request.WasRedirected())
MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url));
if (request.ErrorCode() != HTTP::OK)
MYTHROW(FetchSessionIdError, (DebugPrint(request)));
SessionID const sid = { request.CombinedCookies(), FindAuthenticityToken(request.ServerResponse()) };
if (sid.m_cookies.empty() || sid.m_token.empty())
MYTHROW(FetchSessionIdError, ("Cookies and/or token are empty for request", DebugPrint(request)));
return sid;
}
void OsmOAuth::LogoutUser(SessionID const & sid) const
{
HttpClient request(m_baseUrl + "/logout");
request.SetCookies(sid.m_cookies);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("LogoutUser Network error while connecting to", request.UrlRequested()));
if (request.ErrorCode() != HTTP::OK)
MYTHROW(LogoutUserError, (DebugPrint(request)));
}
bool OsmOAuth::LoginUserPassword(string const & login, string const & password, SessionID const & sid) const
{
map<string, string> const params =
{
{"username", login},
{"password", password},
{"referer", "/"},
{"commit", "Login"},
{"authenticity_token", sid.m_token}
};
HttpClient request(m_baseUrl + "/login");
request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded")
.SetCookies(sid.m_cookies)
.SetHandleRedirects(false);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("LoginUserPassword Network error while connecting to", request.UrlRequested()));
// At the moment, automatic redirects handling is buggy on Androids < 4.4.
// set_handle_redirects(false) works only for Android code, iOS one (and curl) still automatically follow all redirects.
if (request.ErrorCode() != HTTP::OK && request.ErrorCode() != HTTP::Found)
MYTHROW(LoginUserPasswordServerError, (DebugPrint(request)));
// Not redirected page is a 100% signal that login and/or password are invalid.
if (!request.WasRedirected())
return false;
// Check if we were redirected to some 3rd party site.
if (request.UrlReceived().find(m_baseUrl) != 0)
MYTHROW(UnexpectedRedirect, (DebugPrint(request)));
// m_baseUrl + "/login" means login and/or password are invalid.
return request.ServerResponse().find("/login") == string::npos;
}
bool OsmOAuth::LoginSocial(string const & callbackPart, string const & socialToken, SessionID const & sid) const
{
string const url = m_baseUrl + callbackPart + socialToken;
HttpClient request(url);
request.SetCookies(sid.m_cookies)
.SetHandleRedirects(false);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("LoginSocial Network error while connecting to", request.UrlRequested()));
if (request.ErrorCode() != HTTP::OK && request.ErrorCode() != HTTP::Found)
MYTHROW(LoginSocialServerError, (DebugPrint(request)));
// Not redirected page is a 100% signal that social login has failed.
if (!request.WasRedirected())
return false;
// Check if we were redirected to some 3rd party site.
if (request.UrlReceived().find(m_baseUrl) != 0)
MYTHROW(UnexpectedRedirect, (DebugPrint(request)));
// m_baseUrl + "/login" means login and/or password are invalid.
return request.ServerResponse().find("/login") == string::npos;
}
// Fakes a buttons press to automatically accept requested permissions.
string OsmOAuth::SendAuthRequest(string const & requestTokenKey, SessionID const & lastSid) const
{
// We have to get a new CSRF token, using existing cookies to open the correct page.
SessionID const & sid =
FetchSessionId("/oauth/authorize?oauth_token=" + requestTokenKey, lastSid.m_cookies);
map<string, string> const params =
{
{"oauth_token", requestTokenKey},
{"oauth_callback", ""},
{"authenticity_token", sid.m_token},
{"allow_read_prefs", "yes"},
{"allow_write_api", "yes"},
{"allow_write_gpx", "yes"},
{"allow_write_notes", "yes"},
{"commit", "Save changes"}
};
HttpClient request(m_baseUrl + "/oauth/authorize");
request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded")
.SetCookies(sid.m_cookies)
.SetHandleRedirects(false);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("SendAuthRequest Network error while connecting to", request.UrlRequested()));
string const callbackURL = request.UrlReceived();
string const vKey = "oauth_verifier=";
auto const pos = callbackURL.find(vKey);
if (pos == string::npos)
MYTHROW(SendAuthRequestError, ("oauth_verifier is not found", DebugPrint(request)));
auto const end = callbackURL.find("&", pos);
return callbackURL.substr(pos + vKey.length(), end == string::npos ? end : end - pos - vKey.length());
}
RequestToken OsmOAuth::FetchRequestToken() const
{
OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second);
OAuth::Client oauth(&consumer);
string const requestTokenUrl = m_baseUrl + "/oauth/request_token";
string const requestTokenQuery = oauth.getURLQueryString(OAuth::Http::Get, requestTokenUrl + "?oauth_callback=oob");
HttpClient request(requestTokenUrl + "?" + requestTokenQuery);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("FetchRequestToken Network error while connecting to", request.UrlRequested()));
if (request.ErrorCode() != HTTP::OK)
MYTHROW(FetchRequestTokenServerError, (DebugPrint(request)));
if (request.WasRedirected())
MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", request.UrlRequested()));
// Throws std::runtime_error.
OAuth::Token const reqToken = OAuth::Token::extract(request.ServerResponse());
return { reqToken.key(), reqToken.secret() };
}
KeySecret OsmOAuth::FinishAuthorization(RequestToken const & requestToken,
string const & verifier) const
{
OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second);
OAuth::Token const reqToken(requestToken.first, requestToken.second, verifier);
OAuth::Client oauth(&consumer, &reqToken);
string const accessTokenUrl = m_baseUrl + "/oauth/access_token";
string const queryString = oauth.getURLQueryString(OAuth::Http::Get, accessTokenUrl, "", true);
HttpClient request(accessTokenUrl + "?" + queryString);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("FinishAuthorization Network error while connecting to", request.UrlRequested()));
if (request.ErrorCode() != HTTP::OK)
MYTHROW(FinishAuthorizationServerError, (DebugPrint(request)));
if (request.WasRedirected())
MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", request.UrlRequested()));
OAuth::KeyValuePairs const responseData = OAuth::ParseKeyValuePairs(request.ServerResponse());
// Throws std::runtime_error.
OAuth::Token const accessToken = OAuth::Token::extract(responseData);
return { accessToken.key(), accessToken.secret() };
}
// Given a web session id, fetches an OAuth access token.
KeySecret OsmOAuth::FetchAccessToken(SessionID const & sid) const
{
// Aquire a request token.
RequestToken const requestToken = FetchRequestToken();
// Faking a button press for access rights.
string const pin = SendAuthRequest(requestToken.first, sid);
LogoutUser(sid);
// Got pin, exchange it for the access token.
return FinishAuthorization(requestToken, pin);
}
bool OsmOAuth::AuthorizePassword(string const & login, string const & password)
{
SessionID const sid = FetchSessionId();
if (!LoginUserPassword(login, password, sid))
return false;
m_tokenKeySecret = FetchAccessToken(sid);
return true;
}
bool OsmOAuth::AuthorizeFacebook(string const & facebookToken)
{
SessionID const sid = FetchSessionId();
if (!LoginSocial(kFacebookCallbackPart, facebookToken, sid))
return false;
m_tokenKeySecret = FetchAccessToken(sid);
return true;
}
bool OsmOAuth::AuthorizeGoogle(string const & googleToken)
{
SessionID const sid = FetchSessionId();
if (!LoginSocial(kGoogleCallbackPart, googleToken, sid))
return false;
m_tokenKeySecret = FetchAccessToken(sid);
return true;
}
OsmOAuth::UrlRequestToken OsmOAuth::GetFacebookOAuthURL() const
{
RequestToken const requestToken = FetchRequestToken();
string const url = m_baseUrl + kFacebookOAuthPart + requestToken.first;
return UrlRequestToken(url, requestToken);
}
OsmOAuth::UrlRequestToken OsmOAuth::GetGoogleOAuthURL() const
{
RequestToken const requestToken = FetchRequestToken();
string const url = m_baseUrl + kGoogleOAuthPart + requestToken.first;
return UrlRequestToken(url, requestToken);
}
bool OsmOAuth::ResetPassword(string const & email) const
{
string const kForgotPasswordUrlPart = "/user/forgot-password";
SessionID const sid = FetchSessionId(kForgotPasswordUrlPart);
map<string, string> const params =
{
{"user[email]", email},
{"authenticity_token", sid.m_token},
{"commit", "Reset password"}
};
HttpClient request(m_baseUrl + kForgotPasswordUrlPart);
request.SetBodyData(BuildPostRequest(params), "application/x-www-form-urlencoded");
request.SetCookies(sid.m_cookies);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("ResetPassword Network error while connecting to", request.UrlRequested()));
if (request.ErrorCode() != HTTP::OK)
MYTHROW(ResetPasswordServerError, (DebugPrint(request)));
if (request.WasRedirected() && request.UrlReceived().find(m_baseUrl) != string::npos)
return true;
return false;
}
OsmOAuth::Response OsmOAuth::Request(string const & method, string const & httpMethod, string const & body) const
{
if (!IsValid(m_tokenKeySecret))
MYTHROW(InvalidKeySecret, ("User token (key and secret) are empty."));
OAuth::Consumer const consumer(m_consumerKeySecret.first, m_consumerKeySecret.second);
OAuth::Token const oatoken(m_tokenKeySecret.first, m_tokenKeySecret.second);
OAuth::Client oauth(&consumer, &oatoken);
OAuth::Http::RequestType reqType;
if (httpMethod == "GET")
reqType = OAuth::Http::Get;
else if (httpMethod == "POST")
reqType = OAuth::Http::Post;
else if (httpMethod == "PUT")
reqType = OAuth::Http::Put;
else if (httpMethod == "DELETE")
reqType = OAuth::Http::Delete;
else
MYTHROW(UnsupportedApiRequestMethod, ("Unsupported OSM API request method", httpMethod));
string url = m_apiUrl + kApiVersion + method;
string const query = oauth.getURLQueryString(reqType, url);
auto const qPos = url.find('?');
if (qPos != string::npos)
url = url.substr(0, qPos);
HttpClient request(url + "?" + query);
if (httpMethod != "GET")
request.SetBodyData(body, "application/xml", httpMethod);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("Request Network error while connecting to", url));
if (request.WasRedirected())
MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url));
return Response(request.ErrorCode(), request.ServerResponse());
}
OsmOAuth::Response OsmOAuth::DirectRequest(string const & method, bool api) const
{
string const url = api ? m_apiUrl + kApiVersion + method : m_baseUrl + method;
HttpClient request(url);
if (!request.RunHttpRequest())
MYTHROW(NetworkError, ("DirectRequest Network error while connecting to", url));
if (request.WasRedirected())
MYTHROW(UnexpectedRedirect, ("Redirected to", request.UrlReceived(), "from", url));
return Response(request.ErrorCode(), request.ServerResponse());
}
string DebugPrint(OsmOAuth::Response const & code)
{
string r;
switch (code.first)
{
case OsmOAuth::HTTP::OK: r = "OK"; break;
case OsmOAuth::HTTP::BadXML: r = "BadXML"; break;
case OsmOAuth::HTTP::BadAuth: r = "BadAuth"; break;
case OsmOAuth::HTTP::Redacted: r = "Redacted"; break;
case OsmOAuth::HTTP::NotFound: r = "NotFound"; break;
case OsmOAuth::HTTP::WrongMethod: r = "WrongMethod"; break;
case OsmOAuth::HTTP::Conflict: r = "Conflict"; break;
case OsmOAuth::HTTP::Gone: r = "Gone"; break;
case OsmOAuth::HTTP::PreconditionFailed: r = "PreconditionFailed"; break;
case OsmOAuth::HTTP::URITooLong: r = "URITooLong"; break;
case OsmOAuth::HTTP::TooMuchData: r = "TooMuchData"; break;
default:
// No data from server in case of NetworkError.
if (code.first < 0)
return "NetworkError " + strings::to_string(code.first);
r = "HTTP " + strings::to_string(code.first);
}
return r + ": " + code.second;
}
} // namespace osm
| darina/omim | editor/osm_auth.cpp | C++ | apache-2.0 | 16,629 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.formats.json.debezium;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.formats.json.JsonOptions;
import org.apache.flink.table.api.ValidationException;
import static org.apache.flink.formats.json.debezium.DebeziumJsonFormatFactory.IDENTIFIER;
/** Option utils for debezium-json format. */
public class DebeziumJsonOptions {
public static final ConfigOption<Boolean> SCHEMA_INCLUDE =
ConfigOptions.key("schema-include")
.booleanType()
.defaultValue(false)
.withDescription(
"When setting up a Debezium Kafka Connect, users can enable "
+ "a Kafka configuration 'value.converter.schemas.enable' to include schema in the message. "
+ "This option indicates the Debezium JSON data include the schema in the message or not. "
+ "Default is false.");
public static final ConfigOption<Boolean> IGNORE_PARSE_ERRORS = JsonOptions.IGNORE_PARSE_ERRORS;
public static final ConfigOption<String> TIMESTAMP_FORMAT = JsonOptions.TIMESTAMP_FORMAT;
public static final ConfigOption<String> JSON_MAP_NULL_KEY_MODE = JsonOptions.MAP_NULL_KEY_MODE;
public static final ConfigOption<String> JSON_MAP_NULL_KEY_LITERAL =
JsonOptions.MAP_NULL_KEY_LITERAL;
// --------------------------------------------------------------------------------------------
// Validation
// --------------------------------------------------------------------------------------------
/** Validator for debezium decoding format. */
public static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonOptions.validateDecodingFormatOptions(tableOptions);
}
/** Validator for debezium encoding format. */
public static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonOptions.validateEncodingFormatOptions(tableOptions);
// validator for {@link SCHEMA_INCLUDE}
if (tableOptions.get(SCHEMA_INCLUDE)) {
throw new ValidationException(
String.format(
"Debezium JSON serialization doesn't support '%s.%s' option been set to true.",
IDENTIFIER, SCHEMA_INCLUDE.key()));
}
}
}
| rmetzger/flink | flink-formats/flink-json/src/main/java/org/apache/flink/formats/json/debezium/DebeziumJsonOptions.java | Java | apache-2.0 | 3,339 |
describe Hbc::Artifact::NestedContainer, :cask do
describe "install" do
it "extracts the specified paths as containers" do
cask = Hbc::CaskLoader.load_from_file(TEST_FIXTURE_DIR/"cask/Casks/nested-app.rb").tap do |c|
InstallHelper.install_without_artifacts(c)
end
shutup do
Hbc::Artifact::NestedContainer.new(cask).install_phase
end
expect(cask.staged_path.join("MyNestedApp.app")).to be_a_directory
end
end
end
| ehsanfar/ofspy_Lite | bin/homebrew/Library/Homebrew/test/cask/artifact/nested_container_spec.rb | Ruby | apache-2.0 | 472 |
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.test.persistence.scripts.quartzmockentities;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.IdClass;
@Entity(name = "QRTZ_PAUSED_TRIGGER_GRPS")
@IdClass(QrtzPausedTriggersId.class)
public class QrtzPausedTriggerGrps {
@Id
@Column(name = "SCHED_NAME")
private String schedulerName;
@Id
@Column(name = "TRIGGER_GROUP")
private String triggerGroup;
public QrtzPausedTriggerGrps schedulerName(final String schedulerName) {
this.schedulerName = schedulerName;
return this;
}
public QrtzPausedTriggerGrps triggerGroup(final String triggerGroup) {
this.triggerGroup = triggerGroup;
return this;
}
}
| mbiarnes/jbpm | jbpm-test-util/src/main/java/org/jbpm/test/persistence/scripts/quartzmockentities/QrtzPausedTriggerGrps.java | Java | apache-2.0 | 1,381 |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package predicates
import (
"errors"
"fmt"
"os"
"regexp"
"strconv"
"k8s.io/klog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
// MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity.
MatchInterPodAffinityPred = "MatchInterPodAffinity"
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
CheckVolumeBindingPred = "CheckVolumeBinding"
// CheckNodeConditionPred defines the name of predicate CheckNodeCondition.
CheckNodeConditionPred = "CheckNodeCondition"
// GeneralPred defines the name of predicate GeneralPredicates.
GeneralPred = "GeneralPredicates"
// HostNamePred defines the name of predicate HostName.
HostNamePred = "HostName"
// PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts.
PodFitsHostPortsPred = "PodFitsHostPorts"
// MatchNodeSelectorPred defines the name of predicate MatchNodeSelector.
MatchNodeSelectorPred = "MatchNodeSelector"
// PodFitsResourcesPred defines the name of predicate PodFitsResources.
PodFitsResourcesPred = "PodFitsResources"
// NoDiskConflictPred defines the name of predicate NoDiskConflict.
NoDiskConflictPred = "NoDiskConflict"
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
CheckServiceAffinityPred = "CheckServiceAffinity"
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
// CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure.
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
// CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure.
CheckNodePIDPressurePred = "CheckNodePIDPressure"
// DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE
// GCE instances can have up to 16 PD volumes attached.
DefaultMaxGCEPDVolumes = 16
// DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure
// Larger Azure VMs can actually have much more disks attached.
// TODO We should determine the max based on VM size
DefaultMaxAzureDiskVolumes = 16
// KubeMaxPDVols defines the maximum number of PD Volumes per kubelet
KubeMaxPDVols = "KUBE_MAX_PD_VOLS"
// EBSVolumeFilterType defines the filter name for EBSVolumeFilter.
EBSVolumeFilterType = "EBS"
// GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter.
GCEPDVolumeFilterType = "GCE"
// AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter.
AzureDiskVolumeFilterType = "AzureDisk"
)
// IMPORTANT NOTE for predicate developers:
// We are using cached predicate result for pods belonging to the same equivalence class.
// So when updating an existing predicate, you should consider whether your change will introduce new
// dependency to attributes of any API object like Pod, Node, Service etc.
// If yes, you are expected to invalidate the cached predicate result for related API object change.
// For example:
// https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422
// IMPORTANT NOTE: this list contains the ordering of the predicates, if you develop a new predicate
// it is mandatory to add its name to this list.
// Otherwise it won't be processed, see generic_scheduler#podFitsOnNode().
// The order is based on the restrictiveness & complexity of predicates.
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
var (
predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred,
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
)
// NodeInfo interface represents anything that can get node object from node ID.
type NodeInfo interface {
GetNodeInfo(nodeID string) (*v1.Node, error)
}
// PersistentVolumeInfo interface represents anything that can get persistent volume object by PV ID.
type PersistentVolumeInfo interface {
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
}
// CachedPersistentVolumeInfo implements PersistentVolumeInfo
type CachedPersistentVolumeInfo struct {
corelisters.PersistentVolumeLister
}
// Ordering returns the ordering of predicates.
func Ordering() []string {
return predicatesOrdering
}
// SetPredicatesOrdering sets the ordering of predicates.
func SetPredicatesOrdering(names []string) {
predicatesOrdering = names
}
// GetPersistentVolumeInfo returns a persistent volume object by PV ID.
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
return c.Get(pvID)
}
// PersistentVolumeClaimInfo interface represents anything that can get a PVC object in
// specified namespace with specified name.
type PersistentVolumeClaimInfo interface {
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
}
// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
type CachedPersistentVolumeClaimInfo struct {
corelisters.PersistentVolumeClaimLister
}
// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) {
return c.PersistentVolumeClaims(namespace).Get(name)
}
// CachedNodeInfo implements NodeInfo
type CachedNodeInfo struct {
corelisters.NodeLister
}
// GetNodeInfo returns cached data for the node 'id'.
func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
node, err := c.Get(id)
if apierrors.IsNotFound(err) {
return nil, err
}
if err != nil {
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
}
return node, nil
}
// StorageClassInfo interface represents anything that can get a storage class object by class name.
type StorageClassInfo interface {
GetStorageClassInfo(className string) (*storagev1.StorageClass, error)
}
// CachedStorageClassInfo implements StorageClassInfo
type CachedStorageClassInfo struct {
storagelisters.StorageClassLister
}
// GetStorageClassInfo get StorageClass by class name.
func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) {
return c.Get(className)
}
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
// fast path if there is no conflict checking targets.
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil {
return false
}
for _, existingVolume := range pod.Spec.Volumes {
// Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only.
if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil {
disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk
if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) {
return true
}
}
if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil {
if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID {
return true
}
}
if volume.ISCSI != nil && existingVolume.ISCSI != nil {
iqn := volume.ISCSI.IQN
eiqn := existingVolume.ISCSI.IQN
// two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type
// RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods
// conflict unless all other pods mount as read only.
if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) {
return true
}
}
if volume.RBD != nil && existingVolume.RBD != nil {
mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
// two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name
// only one read-write mount is permitted for the same RBD image.
// same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only
if haveOverlap(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) {
return true
}
}
}
return false
}
// NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that
// are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume
// can't be scheduled there.
// This is GCE, Amazon EBS, and Ceph RBD specific for now:
// - GCE PD allows multiple mounts as long as they're all read-only
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) {
return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil
}
}
}
return true, nil, nil
}
// MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate.
type MaxPDVolumeCountChecker struct {
filter VolumeFilter
volumeLimitKey v1.ResourceName
maxVolumeFunc func(node *v1.Node) int
pvInfo PersistentVolumeInfo
pvcInfo PersistentVolumeClaimInfo
// The string below is generated randomly during the struct's initialization.
// It is used to prefix volumeID generated inside the predicate() method to
// avoid conflicts with any real volume.
randomVolumeIDPrefix string
}
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps
type VolumeFilter struct {
// Filter normal volumes
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
}
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
// number of volumes which match a filter that it requests, and those that are already present.
//
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
// the maximum.
func NewMaxPDVolumeCountPredicate(
filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate {
var filter VolumeFilter
var volumeLimitKey v1.ResourceName
switch filterName {
case EBSVolumeFilterType:
filter = EBSVolumeFilter
volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey)
case GCEPDVolumeFilterType:
filter = GCEPDVolumeFilter
volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey)
case AzureDiskVolumeFilterType:
filter = AzureDiskVolumeFilter
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
default:
klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
return nil
}
c := &MaxPDVolumeCountChecker{
filter: filter,
volumeLimitKey: volumeLimitKey,
maxVolumeFunc: getMaxVolumeFunc(filterName),
pvInfo: pvInfo,
pvcInfo: pvcInfo,
randomVolumeIDPrefix: rand.String(32),
}
return c.predicate
}
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
return func(node *v1.Node) int {
maxVolumesFromEnv := getMaxVolLimitFromEnv()
if maxVolumesFromEnv > 0 {
return maxVolumesFromEnv
}
var nodeInstanceType string
for k, v := range node.ObjectMeta.Labels {
if k == kubeletapis.LabelInstanceType {
nodeInstanceType = v
}
}
switch filterName {
case EBSVolumeFilterType:
return getMaxEBSVolume(nodeInstanceType)
case GCEPDVolumeFilterType:
return DefaultMaxGCEPDVolumes
case AzureDiskVolumeFilterType:
return DefaultMaxAzureDiskVolumes
default:
return -1
}
}
}
func getMaxEBSVolume(nodeInstanceType string) int {
if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok {
return volumeutil.DefaultMaxEBSNitroVolumeLimit
}
return volumeutil.DefaultMaxEBSVolumes
}
// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value
func getMaxVolLimitFromEnv() int {
if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" {
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
} else if parsedMaxVols <= 0 {
klog.Errorf("Maximum PD volumes must be a positive value, using default ")
} else {
return parsedMaxVols
}
}
return -1
}
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
for i := range volumes {
vol := &volumes[i]
if id, ok := c.filter.FilterVolume(vol); ok {
filteredVolumes[id] = true
} else if vol.PersistentVolumeClaim != nil {
pvcName := vol.PersistentVolumeClaim.ClaimName
if pvcName == "" {
return fmt.Errorf("PersistentVolumeClaim had no name")
}
// Until we know real ID of the volume use namespace/pvcName as substitute
// with a random prefix (calculated and stored inside 'c' during initialization)
// to avoid conflicts with existing volume IDs.
pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
if err != nil || pvc == nil {
// if the PVC is not found, log the error and count the PV towards the PV limit
klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
filteredVolumes[pvID] = true
continue
}
pvName := pvc.Spec.VolumeName
if pvName == "" {
// PVC is not bound. It was either deleted and created again or
// it was forcefully unbound by admin. The pod can still use the
// original PV where it was bound to -> log the error and count
// the PV towards the PV limit
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
filteredVolumes[pvID] = true
continue
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
if err != nil || pv == nil {
// if the PV is not found, log the error
// and count the PV towards the PV limit
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
filteredVolumes[pvID] = true
continue
}
if id, ok := c.filter.FilterPersistentVolume(pv); ok {
filteredVolumes[id] = true
}
}
}
return nil
}
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 {
return true, nil, nil
}
newVolumes := make(map[string]bool)
if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
return false, nil, err
}
// quick return
if len(newVolumes) == 0 {
return true, nil, nil
}
// count unique volumes
existingVolumes := make(map[string]bool)
for _, existingPod := range nodeInfo.Pods() {
if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil {
return false, nil, err
}
}
numExistingVolumes := len(existingVolumes)
// filter out already-mounted volumes
for k := range existingVolumes {
if _, ok := newVolumes[k]; ok {
delete(newVolumes, k)
}
}
numNewVolumes := len(newVolumes)
maxAttachLimit := c.maxVolumeFunc(nodeInfo.Node())
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
volumeLimits := nodeInfo.VolumeLimits()
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
maxAttachLimit = int(maxAttachLimitFromAllocatable)
}
}
if numExistingVolumes+numNewVolumes > maxAttachLimit {
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
}
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
nodeInfo.TransientInfo.TransientLock.Lock()
defer nodeInfo.TransientInfo.TransientLock.Unlock()
nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes
}
return true, nil, nil
}
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
var EBSVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.AWSElasticBlockStore != nil {
return vol.AWSElasticBlockStore.VolumeID, true
}
return "", false
},
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
if pv.Spec.AWSElasticBlockStore != nil {
return pv.Spec.AWSElasticBlockStore.VolumeID, true
}
return "", false
},
}
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
var GCEPDVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.GCEPersistentDisk != nil {
return vol.GCEPersistentDisk.PDName, true
}
return "", false
},
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
if pv.Spec.GCEPersistentDisk != nil {
return pv.Spec.GCEPersistentDisk.PDName, true
}
return "", false
},
}
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
var AzureDiskVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.AzureDisk != nil {
return vol.AzureDisk.DiskName, true
}
return "", false
},
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
if pv.Spec.AzureDisk != nil {
return pv.Spec.AzureDisk.DiskName, true
}
return "", false
},
}
// VolumeZoneChecker contains information to check the volume zone for a predicate.
type VolumeZoneChecker struct {
pvInfo PersistentVolumeInfo
pvcInfo PersistentVolumeClaimInfo
classInfo StorageClassInfo
}
// NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given
// that some volumes may have zone scheduling constraints. The requirement is that any
// volume zone-labels must match the equivalent zone-labels on the node. It is OK for
// the node to have more zone-label constraints (for example, a hypothetical replicated
// volume might allow region-wide access)
//
// Currently this is only supported with PersistentVolumeClaims, and looks to the labels
// only on the bound PersistentVolume.
//
// Working with volumes declared inline in the pod specification (i.e. not
// using a PersistentVolume) is likely to be harder, as it would require
// determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway.
func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate {
c := &VolumeZoneChecker{
pvInfo: pvInfo,
pvcInfo: pvcInfo,
classInfo: classInfo,
}
return c.predicate
}
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 {
return true, nil, nil
}
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
nodeConstraints := make(map[string]string)
for k, v := range node.ObjectMeta.Labels {
if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion {
continue
}
nodeConstraints[k] = v
}
if len(nodeConstraints) == 0 {
// The node has no zone constraints, so we're OK to schedule.
// In practice, when using zones, all nodes must be labeled with zone labels.
// We want to fast-path this case though.
return true, nil, nil
}
namespace := pod.Namespace
manifest := &(pod.Spec)
for i := range manifest.Volumes {
volume := &manifest.Volumes[i]
if volume.PersistentVolumeClaim != nil {
pvcName := volume.PersistentVolumeClaim.ClaimName
if pvcName == "" {
return false, nil, fmt.Errorf("PersistentVolumeClaim had no name")
}
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
if err != nil {
return false, nil, err
}
if pvc == nil {
return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName)
}
pvName := pvc.Spec.VolumeName
if pvName == "" {
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
scName := v1helper.GetPersistentVolumeClaimClass(pvc)
if len(scName) > 0 {
class, _ := c.classInfo.GetStorageClassInfo(scName)
if class != nil {
if class.VolumeBindingMode == nil {
return false, nil, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", scName)
}
if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
// Skip unbound volumes
continue
}
}
}
}
return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName)
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
if err != nil {
return false, nil, err
}
if pv == nil {
return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName)
}
for k, v := range pv.ObjectMeta.Labels {
if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion {
continue
}
nodeV, _ := nodeConstraints[k]
volumeVSet, err := volumeutil.LabelZonesToSet(v)
if err != nil {
klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
continue
}
if !volumeVSet.Has(nodeV) {
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil
}
}
}
}
return true, nil, nil
}
// GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously.
//
// Example:
//
// Pod:
// InitContainers
// IC1:
// CPU: 2
// Memory: 1G
// IC2:
// CPU: 2
// Memory: 3G
// Containers
// C1:
// CPU: 2
// Memory: 1G
// C2:
// CPU: 1
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
result.SetMaxResource(container.Resources.Requests)
}
return result
}
func podName(pod *v1.Pod) string {
return pod.Namespace + "/" + pod.Name
}
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
// predicate failure reasons if the node has insufficient resources to run the pod.
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
var predicateFails []algorithm.PredicateFailureReason
allowedPodNumber := nodeInfo.AllowedPodNumber()
if len(nodeInfo.Pods())+1 > allowedPodNumber {
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
}
// No extended resources should be ignored by default.
ignoredExtendedResources := sets.NewString()
var podRequest *schedulernodeinfo.Resource
if predicateMeta, ok := meta.(*predicateMetadata); ok {
podRequest = predicateMeta.podRequest
if predicateMeta.ignoredExtendedResources != nil {
ignoredExtendedResources = predicateMeta.ignoredExtendedResources
}
} else {
// We couldn't parse metadata - fallback to computing it.
podRequest = GetResourceRequest(pod)
}
if podRequest.MilliCPU == 0 &&
podRequest.Memory == 0 &&
podRequest.EphemeralStorage == 0 &&
len(podRequest.ScalarResources) == 0 {
return len(predicateFails) == 0, predicateFails, nil
}
allocatable := nodeInfo.AllocatableResource()
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
}
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
}
if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage {
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))
}
for rName, rQuant := range podRequest.ScalarResources {
if v1helper.IsExtendedResourceName(rName) {
// If this resource is one of the extended resources that should be
// ignored, we will skip checking it.
if ignoredExtendedResources.Has(string(rName)) {
continue
}
}
if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName]))
}
}
if klog.V(10) {
if len(predicateFails) == 0 {
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
}
}
return len(predicateFails) == 0, predicateFails, nil
}
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
// terms are ORed, and an empty list of terms will match nothing.
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
nodeFields := map[string]string{}
for k, f := range algorithm.NodeFieldSelectorKeys {
nodeFields[k] = f(node)
}
return v1helper.MatchNodeSelectorTerms(nodeSelectorTerms, labels.Set(node.Labels), fields.Set(nodeFields))
}
// podMatchesNodeSelectorAndAffinityTerms checks whether the pod is schedulable onto nodes according to
// the requirements in both NodeAffinity and nodeSelector.
func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
// Check if node.Labels match pod.Spec.NodeSelector.
if len(pod.Spec.NodeSelector) > 0 {
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
if !selector.Matches(labels.Set(node.Labels)) {
return false
}
}
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
// 6. non-nil empty NodeSelectorRequirement is not allowed
nodeAffinityMatches := true
affinity := pod.Spec.Affinity
if affinity != nil && affinity.NodeAffinity != nil {
nodeAffinity := affinity.NodeAffinity
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
// TODO: Replace next line with subsequent commented-out line when implement RequiredDuringSchedulingRequiredDuringExecution.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
// if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution == nil && nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return true
}
// Match node selector for requiredDuringSchedulingRequiredDuringExecution.
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
// if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms
// klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
// nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
// }
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
}
}
return nodeAffinityMatches
}
// PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
if podMatchesNodeSelectorAndAffinityTerms(pod, node) {
return true, nil, nil
}
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
}
// PodFitsHost checks if a pod spec node name matches the current node.
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if len(pod.Spec.NodeName) == 0 {
return true, nil, nil
}
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
if pod.Spec.NodeName == node.Name {
return true, nil, nil
}
return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil
}
// NodeLabelChecker contains information to check node labels for a predicate.
type NodeLabelChecker struct {
labels []string
presence bool
}
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
// node labels which match a filter that it requests.
func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate {
labelChecker := &NodeLabelChecker{
labels: labels,
presence: presence,
}
return labelChecker.CheckNodeLabelPresence
}
// CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value
// If "presence" is false, then returns false if any of the requested labels matches any of the node's labels,
// otherwise returns true.
// If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels,
// otherwise returns true.
//
// Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
// In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
//
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
var exists bool
nodeLabels := labels.Set(node.Labels)
for _, label := range n.labels {
exists = nodeLabels.Has(label)
if (exists && !n.presence) || (!exists && n.presence) {
return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
}
}
return true, nil, nil
}
// ServiceAffinity defines a struct used for create service affinity predicates.
type ServiceAffinity struct {
podLister algorithm.PodLister
serviceLister algorithm.ServiceLister
nodeInfo NodeInfo
labels []string
}
// serviceAffinityMetadataProducer should be run once by the scheduler before looping through the Predicate. It is a helper function that
// only should be referenced by NewServiceAffinityPredicate.
func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) {
if pm.pod == nil {
klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
return
}
pm.serviceAffinityInUse = true
var errSvc, errList error
// Store services which match the pod.
pm.serviceAffinityMatchingPodServices, errSvc = s.serviceLister.GetPodServices(pm.pod)
selector := CreateSelectorFromLabels(pm.pod.Labels)
allMatches, errList := s.podLister.List(selector)
// In the future maybe we will return them as part of the function.
if errSvc != nil || errList != nil {
klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
}
// consider only the pods that belong to the same namespace
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
}
// NewServiceAffinityPredicate creates a ServiceAffinity.
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataProducer) {
affinity := &ServiceAffinity{
podLister: podLister,
serviceLister: serviceLister,
nodeInfo: nodeInfo,
labels: labels,
}
return affinity.checkServiceAffinity, affinity.serviceAffinityMetadataProducer
}
// checkServiceAffinity is a predicate which matches nodes in such a way to force that
// ServiceAffinity.labels are homogenous for pods that are scheduled to a node.
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
// the same service are running on nodes with the exact same ServiceAffinity.label values).
//
// For example:
// If the first pod of a service was scheduled to a node with label "region=foo",
// all the other subsequent pods belong to the same service will be schedule on
// nodes with the same "region=foo" label.
//
// Details:
//
// If (the svc affinity labels are not a subset of pod's label selectors )
// The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate
// the match.
// Otherwise:
// Create an "implicit selector" which guarantees pods will land on nodes with similar values
// for the affinity labels.
//
// To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace.
// These backfilled labels in the selector "L" are defined like so:
// - L is a label that the ServiceAffinity object needs as a matching constraints.
// - L is not defined in the pod itself already.
// - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value.
//
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var services []*v1.Service
var pods []*v1.Pod
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
services = pm.serviceAffinityMatchingPodServices
pods = pm.serviceAffinityMatchingPodList
} else {
// Make the predicate resilient in case metadata is missing.
pm = &predicateMetadata{pod: pod}
s.serviceAffinityMetadataProducer(pm)
pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices
}
filteredPods := nodeInfo.FilterOutPods(pods)
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
affinityLabels := FindLabelsInSet(s.labels, labels.Set(pod.Spec.NodeSelector))
// Step 1: If we don't have all constraints, introspect nodes to find the missing constraints.
if len(s.labels) > len(affinityLabels) {
if len(services) > 0 {
if len(filteredPods) > 0 {
nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(filteredPods[0].Spec.NodeName)
if err != nil {
return false, nil, err
}
AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels))
}
}
}
// Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find.
if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
return true, nil, nil
}
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
}
// PodFitsHostPorts checks if a node has free ports for the requested pod ports.
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var wantPorts []*v1.ContainerPort
if predicateMeta, ok := meta.(*predicateMetadata); ok {
wantPorts = predicateMeta.podPorts
} else {
// We couldn't parse metadata - fallback to computing it.
wantPorts = schedutil.GetContainerPorts(pod)
}
if len(wantPorts) == 0 {
return true, nil, nil
}
existingPorts := nodeInfo.UsedPorts()
// try to see whether existingPorts and wantPorts will conflict or not
if portsConflict(existingPorts, wantPorts) {
return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
}
return true, nil, nil
}
// search two arrays and return true if they have at least one common element; return false otherwise
func haveOverlap(a1, a2 []string) bool {
m := map[string]bool{}
for _, val := range a1 {
m[val] = true
}
for _, val := range a2 {
if _, ok := m[val]; ok {
return true
}
}
return false
}
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
fit, reasons, err = EssentialPredicates(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
return len(predicateFails) == 0, predicateFails, nil
}
// noncriticalPredicates are the predicates that only non-critical pods need
func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
return len(predicateFails) == 0, predicateFails, nil
}
// EssentialPredicates are the predicates that all pods, including critical pods, need
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := PodFitsHost(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
// TODO: PodFitsHostPorts is essential for now, but kubelet should ideally
// preempt pods to free up host ports too
fit, reasons, err = PodFitsHostPorts(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
fit, reasons, err = PodMatchNodeSelector(pod, meta, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
return len(predicateFails) == 0, predicateFails, nil
}
// PodAffinityChecker contains information to check pod affinity.
type PodAffinityChecker struct {
info NodeInfo
podLister algorithm.PodLister
}
// NewPodAffinityPredicate creates a PodAffinityChecker.
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate {
checker := &PodAffinityChecker{
info: info,
podLister: podLister,
}
return checker.InterPodAffinityMatches
}
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
// predicate failure reasons if the pod cannot be scheduled on the specified node.
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil {
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
return false, failedPredicates, error
}
// Now check if <pod> requirements will be satisfied on this node.
affinity := pod.Spec.Affinity
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
return true, nil, nil
}
if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, meta, nodeInfo, affinity); failedPredicates != nil {
failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates)
return false, failedPredicates, error
}
if klog.V(10) {
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied",
podName(pod), node.Name)
}
return true, nil, nil
}
// podMatchesPodAffinityTerms checks if the "targetPod" matches the given "terms"
// of the "pod" on the given "nodeInfo".Node(). It returns three values: 1) whether
// targetPod matches all the terms and their topologies, 2) whether targetPod
// matches all the terms label selector and namespaces (AKA term properties),
// 3) any error.
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
if len(terms) == 0 {
return false, false, fmt.Errorf("terms array is empty")
}
props, err := getAffinityTermProperties(pod, terms)
if err != nil {
return false, false, err
}
if !podMatchesAllAffinityTermProperties(targetPod, props) {
return false, false, nil
}
// Namespace and selector of the terms have matched. Now we check topology of the terms.
targetPodNode, err := c.info.GetNodeInfo(targetPod.Spec.NodeName)
if err != nil {
return false, false, err
}
for _, term := range terms {
if len(term.TopologyKey) == 0 {
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
}
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNode, term.TopologyKey) {
return false, true, nil
}
}
return true, true, nil
}
// GetPodAffinityTerms gets pod affinity terms by a pod affinity object.
func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
if podAffinity != nil {
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
}
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
//if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
// terms = append(terms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
//}
}
return terms
}
// GetPodAntiAffinityTerms gets pod affinity terms by a pod anti-affinity.
func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
if podAntiAffinity != nil {
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
}
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
//if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
// terms = append(terms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
//}
}
return terms
}
// getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node:
// (1) Whether it has PodAntiAffinity
// (2) Whether ANY AffinityTerm matches the incoming pod
func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) {
affinity := existingPod.Spec.Affinity
if affinity == nil || affinity.PodAntiAffinity == nil {
return nil, nil
}
topologyMaps := newTopologyPairsMaps()
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
return nil, err
}
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
topologyMaps.addTopologyPair(pair, existingPod)
}
}
}
return topologyMaps, nil
}
func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) {
topologyMaps := newTopologyPairsMaps()
for _, existingPod := range existingPods {
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
if err != nil {
if apierrors.IsNotFound(err) {
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
continue
}
return nil, err
}
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode)
if err != nil {
return nil, err
}
topologyMaps.appendMaps(existingPodTopologyMaps)
}
return topologyMaps, nil
}
// Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods.
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
}
var topologyMaps *topologyPairsMaps
if predicateMeta, ok := meta.(*predicateMetadata); ok {
topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap
} else {
// Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not
// present in nodeInfo. Pods on other nodes pass the filter.
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
if err != nil {
errMessage := fmt.Sprintf("Failed to get all pods, %+v", err)
klog.Error(errMessage)
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
}
if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil {
errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err)
klog.Error(errMessage)
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
}
}
// Iterate over topology pairs to get any of the pods being affected by
// the scheduled pod anti-affinity terms
for topologyKey, topologyValue := range node.Labels {
if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name)
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
}
}
if klog.V(10) {
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.",
podName(pod), node.Name)
}
return nil, nil
}
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches
// topology of all the "terms" for the given "pod".
func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node()
for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
if _, ok := topologyPairs.topologyPairToPods[pair]; !ok {
return false
}
} else {
return false
}
}
return true
}
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
// topology of any "term" for the given "pod".
func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node()
for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
if _, ok := topologyPairs.topologyPairToPods[pair]; ok {
return true
}
}
}
return false
}
// Checks if scheduling the pod onto this node would break any term of this pod.
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil")
}
if predicateMeta, ok := meta.(*predicateMetadata); ok {
// Check all affinity terms.
topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods
if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 {
matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms)
if !matchExists {
// This pod may the first pod in a series that have affinity to themselves. In order
// to not leave such pods in pending state forever, we check that if no other pod
// in the cluster matches the namespace and selector of this pod and the pod matches
// its own terms, then we allow the pod to pass the affinity check.
if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
podName(pod), node.Name)
return ErrPodAffinityRulesNotMatch, nil
}
}
}
// Check all anti-affinity terms.
topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods
if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 {
matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms)
if matchExists {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity",
podName(pod), node.Name)
return ErrPodAntiAffinityRulesNotMatch, nil
}
}
} else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms.
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
if err != nil {
return ErrPodAffinityRulesNotMatch, err
}
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
matchFound, termsSelectorMatchFound := false, false
for _, targetPod := range filteredPods {
// Check all affinity terms.
if !matchFound && len(affinityTerms) > 0 {
affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms)
if err != nil {
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
klog.Error(errMessage)
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
}
if termsSelectorMatch {
termsSelectorMatchFound = true
}
if affTermsMatch {
matchFound = true
}
}
// Check all anti-affinity terms.
if len(antiAffinityTerms) > 0 {
antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms)
if err != nil || antiAffTermsMatch {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v",
podName(pod), node.Name, err)
return ErrPodAntiAffinityRulesNotMatch, nil
}
}
}
if !matchFound && len(affinityTerms) > 0 {
// We have not been able to find any matches for the pod's affinity terms.
// This pod may be the first pod in a series that have affinity to themselves. In order
// to not leave such pods in pending state forever, we check that if no other pod
// in the cluster matches the namespace and selector of this pod and the pod matches
// its own terms, then we allow the pod to pass the affinity check.
if termsSelectorMatchFound {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
podName(pod), node.Name)
return ErrPodAffinityRulesNotMatch, nil
}
// Check if pod matches its own affinity properties (namespace and label selector).
if !targetPodMatchesAffinityOfPod(pod, pod) {
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
podName(pod), node.Name)
return ErrPodAffinityRulesNotMatch, nil
}
}
}
if klog.V(10) {
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.",
podName(pod), node.Name)
}
return nil, nil
}
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
// If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`.
podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{
Key: schedulerapi.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
})
// TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13.
if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable {
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
}
return true, nil, nil
}
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
})
}
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute
})
}
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) {
taints, err := nodeInfo.Taints()
if err != nil {
return false, nil, err
}
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) {
return true, nil, nil
}
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
}
// isPodBestEffort checks if pod is scheduled with best-effort QoS
func isPodBestEffort(pod *v1.Pod) bool {
return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort
}
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
// reporting memory pressure condition.
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var podBestEffort bool
if predicateMeta, ok := meta.(*predicateMetadata); ok {
podBestEffort = predicateMeta.podBestEffort
} else {
// We couldn't parse metadata - fallback to computing it.
podBestEffort = isPodBestEffort(pod)
}
// pod is not BestEffort pod
if !podBestEffort {
return true, nil, nil
}
// check if node is under memory pressure
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
}
return true, nil, nil
}
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
// reporting disk pressure condition.
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// check if node is under disk pressure
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
}
return true, nil, nil
}
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
// reporting pid pressure condition.
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// check if node is under pid pressure
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
}
return true, nil, nil
}
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk,
// network unavailable and not ready condition. Only node conditions are accounted in this predicate.
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
reasons := []algorithm.PredicateFailureReason{}
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
node := nodeInfo.Node()
for _, cond := range node.Status.Conditions {
// We consider the node for scheduling only when its:
// - NodeReady condition status is ConditionTrue,
// - NodeOutOfDisk condition status is ConditionFalse,
// - NodeNetworkUnavailable condition status is ConditionFalse.
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
reasons = append(reasons, ErrNodeNotReady)
} else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
reasons = append(reasons, ErrNodeOutOfDisk)
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
reasons = append(reasons, ErrNodeNetworkUnavailable)
}
}
if node.Spec.Unschedulable {
reasons = append(reasons, ErrNodeUnschedulable)
}
return len(reasons) == 0, reasons, nil
}
// VolumeBindingChecker contains information to check a volume binding.
type VolumeBindingChecker struct {
binder *volumebinder.VolumeBinder
}
// NewVolumeBindingPredicate evaluates if a pod can fit due to the volumes it requests,
// for both bound and unbound PVCs.
//
// For PVCs that are bound, then it checks that the corresponding PV's node affinity is
// satisfied by the given node.
//
// For PVCs that are unbound, it tries to find available PVs that can satisfy the PVC requirements
// and that the PV node affinity is satisfied by the given node.
//
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV.
func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate {
c := &VolumeBindingChecker{
binder: binder,
}
return c.predicate
}
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
return true, nil, nil
}
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
unboundSatisfied, boundSatisfied, err := c.binder.Binder.FindPodVolumes(pod, node)
if err != nil {
return false, nil, err
}
failReasons := []algorithm.PredicateFailureReason{}
if !boundSatisfied {
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeNodeConflict)
}
if !unboundSatisfied {
klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
failReasons = append(failReasons, ErrVolumeBindConflict)
}
if len(failReasons) > 0 {
return false, failReasons, nil
}
// All volumes bound or matching PVs found for all unbound PVCs
klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
return true, nil, nil
}
| juanvallejo/kubernetes | pkg/scheduler/algorithm/predicates/predicates.go | GO | apache-2.0 | 68,284 |
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"time"
)
type dockerHubTagMeta struct {
// we don't care what's in these -- we just need to be able to count them
Images []interface{} `json:"images"`
LastUpdated string `json:"last_updated"`
}
func (meta dockerHubTagMeta) lastUpdatedTime() time.Time {
t, err := time.Parse(time.RFC3339Nano, meta.LastUpdated)
if err != nil {
return time.Time{}
}
return t
}
func fetchDockerHubTagMeta(repoTag string) dockerHubTagMeta {
repoTag = latestizeRepoTag(repoTag)
parts := strings.SplitN(repoTag, ":", 2)
repo, tag := parts[0], parts[1]
var meta dockerHubTagMeta
resp, err := http.Get(fmt.Sprintf("https://hub.docker.com/v2/repositories/%s/tags/%s/", repo, tag))
if err != nil {
return meta
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&meta)
if err != nil {
return meta
}
return meta
}
func dockerCreated(image string) time.Time {
created, err := dockerInspect("{{.Created}}", image)
if err != nil {
fmt.Fprintf(os.Stderr, "warning: error while fetching creation time of %q: %v\n", image, err)
return time.Now()
}
created = strings.TrimSpace(created)
t, err := time.Parse(time.RFC3339Nano, created)
if err != nil {
fmt.Fprintf(os.Stderr, "warning: error while parsing creation time of %q (%q): %v\n", image, created, err)
return time.Now()
}
return t
}
| chorrell/official-images | bashbrew/go/src/bashbrew/hub.go | GO | apache-2.0 | 1,396 |
using OfficeDevPnP.PowerShell.Commands.Base.PipeBinds;
using Microsoft.SharePoint.Client;
using System.Management.Automation;
using System;
using OfficeDevPnP.Core.Entities;
using Microsoft.SharePoint.Client.Taxonomy;
using System.Collections.Generic;
using OfficeDevPnP.PowerShell.CmdletHelpAttributes;
using System.Linq;
namespace OfficeDevPnP.PowerShell.Commands
{
[Cmdlet(VerbsCommon.Set, "SPODefaultColumnValues")]
[CmdletHelp("Sets default column values for a document library", DetailedDescription="Sets default column values for a document library, per folder, or for the root folder if the folder parameter has not been specified. Supports both text and taxonomy fields.")]
[CmdletExample(Code = "PS:> Set-SPODefaultColumnValues -List Documents -Field TaxKeyword -Value \"Company|Locations|Stockholm\"", SortOrder = 1, Remarks = "Sets a default value for the enterprise keywords field on a library to a term called \"Stockholm\", located in the \"Locations\" term set, which is part of the \"Company\" term group")]
[CmdletExample(Code = "PS:> Set-SPODefaultColumnValues -List Documents -Field MyTextField -Value \"DefaultValue\"", SortOrder = 2, Remarks = "Sets a default value for the MyTextField text field on a library to a value of \"DefaultValue\"")]
public class SetDefaultColumnValues : SPOWebCmdlet
{
[Parameter(Mandatory = false, ValueFromPipeline = true, Position = 0, HelpMessage = "The ID, Name or Url of the list.")]
public ListPipeBind List;
[Parameter(Mandatory = true, HelpMessage="The internal name, id or a reference to a field")]
public FieldPipeBind Field;
[Parameter(Mandatory = true, HelpMessage="A list of values. In case of a text field the values will be concatenated, separated by a semi-column. In case of a taxonomy field multiple values will added")]
public string[] Value;
[Parameter(Mandatory = false, HelpMessage="A library relative folder path, if not specified it will set the default column values on the root folder of the library ('/')")]
public string Folder = "/";
protected override void ExecuteCmdlet()
{
List list = null;
if (List != null)
{
list = SelectedWeb.GetList(List);
}
if (list != null)
{
if (list.BaseTemplate == (int)ListTemplateType.DocumentLibrary)
{
Field field = null;
// Get the field
if (Field.Field != null)
{
field = Field.Field;
if (!field.IsPropertyAvailable("TypeAsString"))
{
ClientContext.Load(field, f => f.TypeAsString);
}
if (!field.IsPropertyAvailable("InternalName"))
{
ClientContext.Load(field, f => f.InternalName);
}
ClientContext.Load(field);
ClientContext.ExecuteQueryRetry();
}
else if (Field.Id != Guid.Empty)
{
field = list.Fields.GetById(Field.Id);
ClientContext.Load(field, f => f.InternalName, f => f.TypeAsString);
ClientContext.ExecuteQueryRetry();
}
else if (!string.IsNullOrEmpty(Field.Name))
{
field = list.Fields.GetByInternalNameOrTitle(Field.Name);
ClientContext.Load(field, f => f.InternalName, f => f.TypeAsString);
ClientContext.ExecuteQueryRetry();
}
if (field != null)
{
IDefaultColumnValue defaultColumnValue = null;
if (field.TypeAsString == "Text")
{
var values = string.Join(";", Value);
defaultColumnValue = new DefaultColumnTextValue()
{
FieldInternalName = field.InternalName,
FolderRelativePath = Folder,
Text = values
};
}
else
{
List<Term> terms = new List<Term>();
foreach (var termString in Value)
{
var term = ClientContext.Site.GetTaxonomyItemByPath(termString);
if (term != null)
{
terms.Add(term as Term);
}
}
if (terms.Any())
{
defaultColumnValue = new DefaultColumnTermValue()
{
FieldInternalName = field.InternalName,
FolderRelativePath = Folder,
};
terms.ForEach(t => ((DefaultColumnTermValue)defaultColumnValue).Terms.Add(t));
}
}
list.SetDefaultColumnValues(new List<IDefaultColumnValue>() { defaultColumnValue });
}
}
else
{
WriteWarning("List is not a document library");
}
}
}
}
}
| JonathanHuss/PnP | Solutions/PowerShell.Commands/Commands/Lists/SetDefaultColumnValues.cs | C# | apache-2.0 | 5,810 |
<?php
/**
* This file is part of the Nette Framework (https://nette.org)
* Copyright (c) 2004 David Grudl (https://davidgrudl.com)
*/
namespace Nette\Application;
use Nette;
/**
* Default presenter loader.
*/
class PresenterFactory implements IPresenterFactory
{
use Nette\SmartObject;
/** @var array[] of module => splited mask */
private $mapping = [
'*' => ['', '*Module\\', '*Presenter'],
'Nette' => ['NetteModule\\', '*\\', '*Presenter'],
];
/** @var array */
private $cache = [];
/** @var callable */
private $factory;
/**
* @param callable function (string $class): IPresenter
*/
public function __construct(callable $factory = NULL)
{
$this->factory = $factory ?: function ($class) { return new $class; };
}
/**
* Creates new presenter instance.
* @param string presenter name
* @return IPresenter
*/
public function createPresenter($name)
{
return call_user_func($this->factory, $this->getPresenterClass($name));
}
/**
* Generates and checks presenter class name.
* @param string presenter name
* @return string class name
* @throws InvalidPresenterException
*/
public function getPresenterClass(&$name)
{
if (isset($this->cache[$name])) {
return $this->cache[$name];
}
if (!is_string($name) || !Nette\Utils\Strings::match($name, '#^[a-zA-Z\x7f-\xff][a-zA-Z0-9\x7f-\xff:]*\z#')) {
throw new InvalidPresenterException("Presenter name must be alphanumeric string, '$name' is invalid.");
}
$class = $this->formatPresenterClass($name);
if (!class_exists($class)) {
throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' was not found.");
}
$reflection = new \ReflectionClass($class);
$class = $reflection->getName();
if (!$reflection->implementsInterface(IPresenter::class)) {
throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' is not Nette\\Application\\IPresenter implementor.");
} elseif ($reflection->isAbstract()) {
throw new InvalidPresenterException("Cannot load presenter '$name', class '$class' is abstract.");
}
$this->cache[$name] = $class;
if ($name !== ($realName = $this->unformatPresenterClass($class))) {
trigger_error("Case mismatch on presenter name '$name', correct name is '$realName'.", E_USER_WARNING);
$name = $realName;
}
return $class;
}
/**
* Sets mapping as pairs [module => mask]
* @return static
*/
public function setMapping(array $mapping)
{
foreach ($mapping as $module => $mask) {
if (is_string($mask)) {
if (!preg_match('#^\\\\?([\w\\\\]*\\\\)?(\w*\*\w*?\\\\)?([\w\\\\]*\*\w*)\z#', $mask, $m)) {
throw new Nette\InvalidStateException("Invalid mapping mask '$mask'.");
}
$this->mapping[$module] = [$m[1], $m[2] ?: '*Module\\', $m[3]];
} elseif (is_array($mask) && count($mask) === 3) {
$this->mapping[$module] = [$mask[0] ? $mask[0] . '\\' : '', $mask[1] . '\\', $mask[2]];
} else {
throw new Nette\InvalidStateException("Invalid mapping mask for module $module.");
}
}
return $this;
}
/**
* Formats presenter class name from its name.
* @param string
* @return string
* @internal
*/
public function formatPresenterClass($presenter)
{
$parts = explode(':', $presenter);
$mapping = isset($parts[1], $this->mapping[$parts[0]])
? $this->mapping[array_shift($parts)]
: $this->mapping['*'];
while ($part = array_shift($parts)) {
$mapping[0] .= str_replace('*', $part, $mapping[$parts ? 1 : 2]);
}
return $mapping[0];
}
/**
* Formats presenter name from class name.
* @param string
* @return string|NULL
* @internal
*/
public function unformatPresenterClass($class)
{
foreach ($this->mapping as $module => $mapping) {
$mapping = str_replace(['\\', '*'], ['\\\\', '(\w+)'], $mapping);
if (preg_match("#^\\\\?$mapping[0]((?:$mapping[1])*)$mapping[2]\\z#i", $class, $matches)) {
return ($module === '*' ? '' : $module . ':')
. preg_replace("#$mapping[1]#iA", '$1:', $matches[1]) . $matches[3];
}
}
return NULL;
}
}
| MasaharuKomuro/kitakupics | vendor/nette/application/src/Application/PresenterFactory.php | PHP | apache-2.0 | 4,074 |
/**
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.cognito.sync.devauth.client;
/**
* This class is used to store the response of the GetToken call of the sample
* Cognito developer authentication.
*/
public class GetTokenResponse extends Response {
private final String identityId;
private final String identityPoolId;
private final String token;
public GetTokenResponse(final int responseCode, final String responseMessage) {
super(responseCode, responseMessage);
this.identityId = null;
this.identityPoolId = null;
this.token = null;
}
public GetTokenResponse(final String identityId,
final String identityPoolId, final String token) {
super(200, null);
this.identityId = identityId;
this.identityPoolId = identityPoolId;
this.token = token;
}
public String getIdentityId() {
return this.identityId;
}
public String getIdentityPoolId() {
return this.identityPoolId;
}
public String getToken() {
return this.token;
}
}
| lyzxsc/aws-sdk-android-samples | CognitoSyncDemo/src/com/amazonaws/cognito/sync/devauth/client/GetTokenResponse.java | Java | apache-2.0 | 1,640 |
/*
* © Copyright IBM Corp. 2012
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Author: Maire Kehoe (mkehoe@ie.ibm.com)
* Date: 20 Mar 2012
* SkipFileContent.java
*/
package com.ibm.xsp.test.framework.setup;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.ibm.commons.util.StringUtil;
import com.ibm.xsp.test.framework.AbstractXspTest;
import com.ibm.xsp.test.framework.XspTestUtil;
/**
*
* @author Maire Kehoe (mkehoe@ie.ibm.com)
*/
public class SkipFileContent {
/**
*
*/
private static final Object[][] EMPTY_UNCHECKED_ARR = new Object[0][];
private static SkipFileContent EMPTY_SKIP_FILE = new SkipFileContent();
private static SkipFileContent staticSkips = EMPTY_SKIP_FILE;
public static SkipFileContent getStaticSkips() {
return staticSkips;
}
public static void setStaticSkips(SkipFileContent staticSkips) {
if( SkipFileContent.staticSkips != EMPTY_SKIP_FILE ){
// in setup, but setup has already occurred.
throw new IllegalArgumentException("The staticSkips have already been initialized.");
}
SkipFileContent.staticSkips = staticSkips;
}
// public static void clearStaticSkips(){
// SkipFileContent.staticSkips = EMPTY_SKIP_FILE;
// }
public static String[] concatSkips(String[] skips, String testClassName, String testMethodName){
if( null == skips ){
skips = StringUtil.EMPTY_STRING_ARRAY;
}
SkipFileContent content = getStaticSkips();
if( EMPTY_SKIP_FILE == content ){
return skips;
}
String[] staticSkips = content.getSkips(testClassName, testMethodName);
if( null == staticSkips ){
return skips;
}
return XspTestUtil.concat(skips, staticSkips);
}
public static String[] concatSkips(String[] skips, AbstractXspTest testClassName, String testMethodName){
return concatSkips(skips, testClassName.getClass().getName(), testMethodName);
}
private Map<String, String[]> skips;
private List<String> checked;
public SkipFileContent() {
super();
}
public String[] getSkips(String testClassName, String methodName){
if( null == skips ){
return null;
}
String key = toKey(testClassName, methodName);
String[] foundSkips = skips.get(key);
if( null != foundSkips ){
// add to checked list
if( null == checked ){
checked = new ArrayList<String>(skips.size());
}
checked.add(key);
}
return foundSkips;
}
/**
* @param testClassLine
* @param methodName
* @param fails
*/
public void addSkips(String testClassName, String methodName, String[] fails) {
if( null == skips ){
skips = new HashMap<String, String[]>();
}
String key = toKey(testClassName, methodName);
if( skips.containsKey(key) ){
throw new IllegalArgumentException("Skips already registered "
+"with testClassName=" + testClassName + ", methodName="
+ methodName);
}
skips.put(key, fails);
}
/**
* @param testClassName
* @param methodName
* @return
*/
private String toKey(String testClassName, String methodName) {
return testClassName+" "+methodName;
}
public static Object[][] getUncheckedSkips(){
SkipFileContent content = getStaticSkips();
if( EMPTY_SKIP_FILE == content ){
return EMPTY_UNCHECKED_ARR;
}
return content.getAllUncheckedSkips();
}
/**
* @return
*/
private Object[][] getAllUncheckedSkips() {
List<Object[]> unchecked = null;
if( null != skips ){
for (Entry<String,String[]> entry : skips.entrySet()) {
if( null == checked || ! checked.contains(entry.getKey()) ){
if( null == unchecked ){
unchecked = new ArrayList<Object[]>();
}
String key = entry.getKey();
int separatorIndex = key.indexOf(' ');
String testClassName = key.substring(0, separatorIndex);
String methodName = key.substring(separatorIndex+1);
String[] skips = entry.getValue();
unchecked.add(new Object[]{testClassName, methodName, skips});
}
}
}
if( null == unchecked){
return EMPTY_UNCHECKED_ARR;
}
return unchecked.toArray(new Object[unchecked.size()][]);
}
}
| iharkhukhrakou/XPagesExtensionLibrary | extlib/lwp/openntf/test/eclipse/plugins/com.ibm.xsp.test.framework/src/com/ibm/xsp/test/framework/setup/SkipFileContent.java | Java | apache-2.0 | 5,361 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.debugger.settings;
import com.intellij.debugger.DebuggerBundle;
import com.intellij.debugger.ui.JavaDebuggerSupport;
import com.intellij.openapi.options.Configurable;
import com.intellij.openapi.options.SearchableConfigurable;
import com.intellij.openapi.project.Project;
import com.intellij.ui.classFilter.ClassFilterEditor;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
public class DebuggerSteppingConfigurable implements SearchableConfigurable, Configurable.NoScroll {
private JCheckBox myCbStepInfoFiltersEnabled;
private JCheckBox myCbSkipSyntheticMethods;
private JCheckBox myCbSkipConstructors;
private JCheckBox myCbSkipClassLoaders;
private ClassFilterEditor mySteppingFilterEditor;
private JCheckBox myCbSkipSimpleGetters;
private Project myProject;
public void reset() {
final DebuggerSettings settings = DebuggerSettings.getInstance();
myCbSkipSimpleGetters.setSelected(settings.SKIP_GETTERS);
myCbSkipSyntheticMethods.setSelected(settings.SKIP_SYNTHETIC_METHODS);
myCbSkipConstructors.setSelected(settings.SKIP_CONSTRUCTORS);
myCbSkipClassLoaders.setSelected(settings.SKIP_CLASSLOADERS);
myCbStepInfoFiltersEnabled.setSelected(settings.TRACING_FILTERS_ENABLED);
mySteppingFilterEditor.setFilters(settings.getSteppingFilters());
mySteppingFilterEditor.setEnabled(settings.TRACING_FILTERS_ENABLED);
}
public void apply() {
getSettingsTo(DebuggerSettings.getInstance());
}
private void getSettingsTo(DebuggerSettings settings) {
settings.SKIP_GETTERS = myCbSkipSimpleGetters.isSelected();
settings.SKIP_SYNTHETIC_METHODS = myCbSkipSyntheticMethods.isSelected();
settings.SKIP_CONSTRUCTORS = myCbSkipConstructors.isSelected();
settings.SKIP_CLASSLOADERS = myCbSkipClassLoaders.isSelected();
settings.TRACING_FILTERS_ENABLED = myCbStepInfoFiltersEnabled.isSelected();
mySteppingFilterEditor.stopEditing();
settings.setSteppingFilters(mySteppingFilterEditor.getFilters());
}
public boolean isModified() {
final DebuggerSettings currentSettings = DebuggerSettings.getInstance();
final DebuggerSettings debuggerSettings = currentSettings.clone();
getSettingsTo(debuggerSettings);
return !debuggerSettings.equals(currentSettings);
}
public String getDisplayName() {
return DebuggerBundle.message("debugger.stepping.configurable.display.name");
}
@NotNull
public String getHelpTopic() {
return "reference.idesettings.debugger.stepping";
}
@NotNull
public String getId() {
return getHelpTopic();
}
public Runnable enableSearch(String option) {
return null;
}
public JComponent createComponent() {
final JPanel panel = new JPanel(new GridBagLayout());
myProject = JavaDebuggerSupport.getCurrentProject();
myCbSkipSyntheticMethods = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.synthetic.methods"));
myCbSkipConstructors = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.constructors"));
myCbSkipClassLoaders = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.classloaders"));
myCbSkipSimpleGetters = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.skip.simple.getters"));
myCbStepInfoFiltersEnabled = new JCheckBox(DebuggerBundle.message("label.debugger.general.configurable.step.filters.list.header"));
panel.add(myCbSkipSyntheticMethods, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0));
panel.add(myCbSkipConstructors, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0));
panel.add(myCbSkipClassLoaders, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0));
panel.add(myCbSkipSimpleGetters, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(0, 0, 0, 0),0, 0));
panel.add(myCbStepInfoFiltersEnabled, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, new Insets(8, 0, 0, 0),0, 0));
mySteppingFilterEditor = new ClassFilterEditor(myProject, null, "reference.viewBreakpoints.classFilters.newPattern");
panel.add(mySteppingFilterEditor, new GridBagConstraints(0, GridBagConstraints.RELATIVE, 1, 1, 1.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, new Insets(0, 5, 0, 0),0, 0));
myCbStepInfoFiltersEnabled.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
mySteppingFilterEditor.setEnabled(myCbStepInfoFiltersEnabled.isSelected());
}
});
return panel;
}
public void disposeUIResources() {
mySteppingFilterEditor = null;
myProject = null;
}
}
| romankagan/DDBWorkbench | java/debugger/impl/src/com/intellij/debugger/settings/DebuggerSteppingConfigurable.java | Java | apache-2.0 | 5,766 |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.ui.components;
import com.intellij.ide.ui.AntialiasingType;
import com.intellij.openapi.util.NlsContexts;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.ui.*;
import com.intellij.util.ui.GraphicsUtil;
import com.intellij.util.ui.HTMLEditorKitBuilder;
import com.intellij.util.ui.JBFont;
import com.intellij.util.ui.UIUtil;
import com.intellij.util.ui.components.JBComponent;
import org.intellij.lang.annotations.JdkConstants;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.border.Border;
import javax.swing.border.EmptyBorder;
import javax.swing.event.HyperlinkListener;
import javax.swing.text.BadLocationException;
import javax.swing.text.DefaultCaret;
import javax.swing.text.EditorKit;
import javax.swing.text.html.HTMLEditorKit;
import javax.swing.text.html.StyleSheet;
import java.awt.*;
import java.awt.event.FocusAdapter;
import java.awt.event.FocusEvent;
import java.util.Collections;
public class JBLabel extends JLabel implements AnchorableComponent, JBComponent<JBLabel> {
private UIUtil.ComponentStyle myComponentStyle = UIUtil.ComponentStyle.REGULAR;
private UIUtil.FontColor myFontColor = UIUtil.FontColor.NORMAL;
private JComponent myAnchor;
private JEditorPane myEditorPane;
private JLabel myIconLabel;
private boolean myMultiline;
private boolean myAllowAutoWrapping = false;
public JBLabel() {
}
public JBLabel(@NotNull UIUtil.ComponentStyle componentStyle) {
setComponentStyle(componentStyle);
}
public JBLabel(@Nullable Icon image) {
super(image);
}
public JBLabel(@NotNull @NlsContexts.Label String text) {
super(text);
}
public JBLabel(@NotNull @NlsContexts.Label String text, @NotNull UIUtil.ComponentStyle componentStyle) {
super(text);
setComponentStyle(componentStyle);
}
public JBLabel(@NotNull @NlsContexts.Label String text, @NotNull UIUtil.ComponentStyle componentStyle, @NotNull UIUtil.FontColor fontColor) {
super(text);
setComponentStyle(componentStyle);
setFontColor(fontColor);
}
public JBLabel(@NotNull @NlsContexts.Label String text, @JdkConstants.HorizontalAlignment int horizontalAlignment) {
super(text, horizontalAlignment);
}
public JBLabel(@Nullable Icon image, @JdkConstants.HorizontalAlignment int horizontalAlignment) {
super(image, horizontalAlignment);
}
public JBLabel(@NotNull @NlsContexts.Label String text, @Nullable Icon icon, @JdkConstants.HorizontalAlignment int horizontalAlignment) {
super(text, icon, horizontalAlignment);
}
public void setComponentStyle(@NotNull UIUtil.ComponentStyle componentStyle) {
myComponentStyle = componentStyle;
UIUtil.applyStyle(componentStyle, this);
}
public UIUtil.ComponentStyle getComponentStyle() {
return myComponentStyle;
}
public UIUtil.FontColor getFontColor() {
return myFontColor;
}
public void setFontColor(@NotNull UIUtil.FontColor fontColor) {
myFontColor = fontColor;
}
@Override
public Color getForeground() {
if (!isEnabled()) {
return UIUtil.getLabelDisabledForeground();
}
if (myFontColor != null) {
return UIUtil.getLabelFontColor(myFontColor);
}
return super.getForeground();
}
@Override
public void setForeground(Color fg) {
myFontColor = null;
super.setForeground(fg);
if (myEditorPane != null) {
updateEditorPaneStyle();
}
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
if (myEditorPane != null) {
myEditorPane.setEnabled(enabled);
}
}
@Override
public void setAnchor(@Nullable JComponent anchor) {
myAnchor = anchor;
}
@Override
public JComponent getAnchor() {
return myAnchor;
}
@Override
public Dimension getPreferredSize() {
if (myAnchor != null && myAnchor != this) return myAnchor.getPreferredSize();
if (myEditorPane != null) return getLayout().preferredLayoutSize(this);
return super.getPreferredSize();
}
@Override
public Dimension getMinimumSize() {
if (myAnchor != null && myAnchor != this) return myAnchor.getMinimumSize();
if (myEditorPane != null) return getLayout().minimumLayoutSize(this);
return super.getMinimumSize();
}
@Override
public Dimension getMaximumSize() {
if (myAnchor != null && myAnchor != this) return myAnchor.getMaximumSize();
if (myEditorPane != null) {
return getLayout().maximumLayoutSize(this);
}
return super.getMaximumSize();
}
@Override
public BorderLayout getLayout() {
return (BorderLayout)super.getLayout();
}
@Override
protected void paintComponent(Graphics g) {
if (myEditorPane == null) {
super.paintComponent(g);
}
}
@Override
public void setText(@NlsContexts.Label String text) {
super.setText(text);
if (myEditorPane != null) {
myEditorPane.setText(getText());
updateEditorPaneStyle();
checkMultiline();
updateTextAlignment();
}
}
@Override
public void setIcon(Icon icon) {
super.setIcon(icon);
if (myIconLabel != null) {
myIconLabel.setIcon(icon);
updateLayout();
updateTextAlignment();
}
}
public void setIconWithAlignment(Icon icon, int horizontalAlignment, int verticalAlignment) {
super.setIcon(icon);
if (myIconLabel != null) {
myIconLabel.setIcon(icon);
myIconLabel.setHorizontalAlignment(horizontalAlignment);
myIconLabel.setVerticalAlignment(verticalAlignment);
updateLayout();
updateTextAlignment();
}
}
@Override
public void setFocusable(boolean focusable) {
super.setFocusable(focusable);
if (myEditorPane != null) {
myEditorPane.setFocusable(focusable);
}
}
private void checkMultiline() {
String text = getText();
myMultiline = text != null && StringUtil.removeHtmlTags(text).contains(System.lineSeparator());
}
@Override
public void setFont(Font font) {
super.setFont(font);
if (myEditorPane != null) {
updateEditorPaneStyle();
updateTextAlignment();
}
}
@Override
public void setIconTextGap(int iconTextGap) {
super.setIconTextGap(iconTextGap);
if (myEditorPane != null) {
updateLayout();
}
}
@Override
public void setBounds(int x, int y, int width, int height) {
super.setBounds(x, y, width, height);
if (myEditorPane != null) {
updateTextAlignment();
}
}
@Override
public void setVerticalTextPosition(int textPosition) {
super.setVerticalTextPosition(textPosition);
if (myEditorPane != null) {
updateTextAlignment();
}
}
@Override
public void setHorizontalTextPosition(int textPosition) {
super.setHorizontalTextPosition(textPosition);
if (myEditorPane != null) {
updateLayout();
}
}
private void updateLayout() {
setLayout(new BorderLayout(getIcon() == null ? 0 : getIconTextGap(), 0));
int position = getHorizontalTextPosition();
String iconConstraint = getComponentOrientation().isLeftToRight() ? BorderLayout.WEST : BorderLayout.EAST;
if (getComponentOrientation().isLeftToRight() && position == SwingConstants.LEADING) iconConstraint = BorderLayout.EAST;
if (!getComponentOrientation().isLeftToRight() && position == SwingConstants.TRAILING) iconConstraint = BorderLayout.EAST;
if (position == SwingConstants.LEFT) iconConstraint = BorderLayout.EAST;
add(myIconLabel, iconConstraint);
add(myEditorPane, BorderLayout.CENTER);
}
@Override
public void updateUI() {
super.updateUI();
if (myEditorPane != null) {
//init inner components again (if any) to provide proper colors when LAF is being changed
setCopyable(false);
setCopyable(true);
}
GraphicsUtil.setAntialiasingType(this, AntialiasingType.getAAHintForSwingComponent());
}
/**
* This listener will be used in 'copyable' mode when a link is updated (clicked, entered, etc.).
*/
@NotNull
protected HyperlinkListener createHyperlinkListener() {
return BrowserHyperlinkListener.INSTANCE;
}
/**
* In 'copyable' mode JBLabel has the same appearance but user can select text with mouse and copy it to clipboard with standard shortcut.
* By default JBLabel is NOT copyable
* Also 'copyable' label supports web hyperlinks (e.g. opens browser on click)
*
* @return 'this' (the same instance)
*/
public JBLabel setCopyable(boolean copyable) {
if (copyable ^ myEditorPane != null) {
if (myEditorPane == null) {
final JLabel ellipsisLabel = new JBLabel("...");
myIconLabel = new JLabel(getIcon());
myEditorPane = new JEditorPane() {
@Override
public void paint(Graphics g) {
Dimension size = getSize();
boolean paintEllipsis = getPreferredSize().width > size.width && !myMultiline && !myAllowAutoWrapping;
if (!paintEllipsis) {
super.paint(g);
}
else {
Dimension ellipsisSize = ellipsisLabel.getPreferredSize();
int endOffset = size.width - ellipsisSize.width;
try {
// do not paint half of the letter
endOffset = modelToView(viewToModel(new Point(endOffset, getHeight() / 2)) - 1).x;
}
catch (BadLocationException ignore) {
}
Shape oldClip = g.getClip();
g.clipRect(0, 0, endOffset, size.height);
super.paint(g);
g.setClip(oldClip);
g.translate(endOffset, 0);
ellipsisLabel.setSize(ellipsisSize);
ellipsisLabel.paint(g);
g.translate(-endOffset, 0);
}
}
};
myEditorPane.addFocusListener(new FocusAdapter() {
@Override
public void focusLost(FocusEvent e) {
if (myEditorPane == null) return;
int caretPosition = myEditorPane.getCaretPosition();
myEditorPane.setSelectionStart(caretPosition);
myEditorPane.setSelectionEnd(caretPosition);
}
});
myEditorPane.setContentType("text/html");
myEditorPane.setEditable(false);
myEditorPane.setBackground(UIUtil.TRANSPARENT_COLOR);
myEditorPane.setOpaque(false);
myEditorPane.addHyperlinkListener(createHyperlinkListener());
ComponentUtil.putClientProperty(myEditorPane, UIUtil.NOT_IN_HIERARCHY_COMPONENTS, Collections.singleton(ellipsisLabel));
myEditorPane.setEditorKit(HTMLEditorKitBuilder.simple());
updateEditorPaneStyle();
if (myEditorPane.getCaret() instanceof DefaultCaret) {
((DefaultCaret)myEditorPane.getCaret()).setUpdatePolicy(DefaultCaret.NEVER_UPDATE);
}
myEditorPane.setText(getText());
checkMultiline();
myEditorPane.setCaretPosition(0);
updateLayout();
updateTextAlignment();
// Remove label from tab order because selectable labels doesn't have visible selection state
setFocusTraversalPolicyProvider(true);
setFocusTraversalPolicy(new DisabledTraversalPolicy());
}
else {
removeAll();
myEditorPane = null;
myIconLabel = null;
}
}
return this;
}
private void updateEditorPaneStyle() {
myEditorPane.setFont(getFont());
myEditorPane.setForeground(getForeground());
EditorKit kit = myEditorPane.getEditorKit();
if (kit instanceof HTMLEditorKit) {
StyleSheet css = ((HTMLEditorKit)kit).getStyleSheet();
css.addRule("body, p {" +
"color:#" + ColorUtil.toHex(getForeground()) + ";" +
"font-family:" + getFont().getFamily() + ";" +
"font-size:" + getFont().getSize() + "pt;" +
"white-space:" + (myAllowAutoWrapping ? "normal" : "nowrap") + ";}");
}
}
/**
* In 'copyable' mode auto-wrapping is disabled by default.
* (In this case you have to markup your HTML with P or BR tags explicitly)
*/
public JBLabel setAllowAutoWrapping(boolean allowAutoWrapping) {
myAllowAutoWrapping = allowAutoWrapping;
return this;
}
public boolean isAllowAutoWrapping() {
return myAllowAutoWrapping;
}
private void updateTextAlignment() {
if (myEditorPane == null) return;
myEditorPane.setBorder(null); // clear border
int position = getVerticalTextPosition();
if (position == TOP) {
return;
}
int preferredHeight = myEditorPane.getPreferredSize().height;
int availableHeight = getHeight();
if (availableHeight <= preferredHeight) {
return;
}
// since the 'top' value is in real already-scaled pixels, should use swing's EmptyBorder
//noinspection UseDPIAwareBorders
myEditorPane.setBorder(new EmptyBorder(position == CENTER ? (availableHeight - preferredHeight + 1) / 2 :
availableHeight - preferredHeight, 0, 0, 0));
}
@Override
public JBLabel withBorder(Border border) {
setBorder(border);
return this;
}
@Override
public JBLabel withFont(JBFont font) {
setFont(font);
return this;
}
@Override
public JBLabel andTransparent() {
setOpaque(false);
return this;
}
@Override
public JBLabel andOpaque() {
setOpaque(true);
return this;
}
} | smmribeiro/intellij-community | platform/platform-api/src/com/intellij/ui/components/JBLabel.java | Java | apache-2.0 | 13,575 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.lifecycle;
import java.io.File;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.*;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Directories;
import org.apache.cassandra.db.Memtable;
import org.apache.cassandra.db.commitlog.ReplayPosition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.compaction.OperationType;
import org.apache.cassandra.io.sstable.format.SSTableReader;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.metrics.StorageMetrics;
import org.apache.cassandra.notifications.*;
import org.apache.cassandra.utils.Pair;
import org.apache.cassandra.utils.Throwables;
import org.apache.cassandra.utils.concurrent.OpOrder;
import static com.google.common.base.Predicates.and;
import static com.google.common.collect.ImmutableSet.copyOf;
import static com.google.common.collect.Iterables.filter;
import static java.util.Collections.singleton;
import static org.apache.cassandra.db.lifecycle.Helpers.*;
import static org.apache.cassandra.db.lifecycle.View.permitCompacting;
import static org.apache.cassandra.db.lifecycle.View.updateCompacting;
import static org.apache.cassandra.db.lifecycle.View.updateLiveSet;
import static org.apache.cassandra.utils.Throwables.maybeFail;
import static org.apache.cassandra.utils.Throwables.merge;
import static org.apache.cassandra.utils.concurrent.Refs.release;
import static org.apache.cassandra.utils.concurrent.Refs.selfRefs;
public class Tracker
{
private static final Logger logger = LoggerFactory.getLogger(Tracker.class);
public final Collection<INotificationConsumer> subscribers = new CopyOnWriteArrayList<>();
public final ColumnFamilyStore cfstore;
final AtomicReference<View> view;
public final boolean loadsstables;
public Tracker(ColumnFamilyStore cfstore, boolean loadsstables)
{
this.cfstore = cfstore;
this.view = new AtomicReference<>();
this.loadsstables = loadsstables;
this.reset();
}
public LifecycleTransaction tryModify(SSTableReader sstable, OperationType operationType)
{
return tryModify(singleton(sstable), operationType);
}
/**
* @return a Transaction over the provided sstables if we are able to mark the given @param sstables as compacted, before anyone else
*/
public LifecycleTransaction tryModify(Iterable<SSTableReader> sstables, OperationType operationType)
{
if (Iterables.isEmpty(sstables))
return new LifecycleTransaction(this, operationType, sstables);
if (null == apply(permitCompacting(sstables), updateCompacting(emptySet(), sstables)))
return null;
return new LifecycleTransaction(this, operationType, sstables);
}
// METHODS FOR ATOMICALLY MODIFYING THE VIEW
Pair<View, View> apply(Function<View, View> function)
{
return apply(Predicates.<View>alwaysTrue(), function);
}
Throwable apply(Function<View, View> function, Throwable accumulate)
{
try
{
apply(function);
}
catch (Throwable t)
{
accumulate = merge(accumulate, t);
}
return accumulate;
}
/**
* atomically tests permit against the view and applies function to it, if permit yields true, returning the original;
* otherwise the method aborts, returning null
*/
Pair<View, View> apply(Predicate<View> permit, Function<View, View> function)
{
while (true)
{
View cur = view.get();
if (!permit.apply(cur))
return null;
View updated = function.apply(cur);
if (view.compareAndSet(cur, updated))
return Pair.create(cur, updated);
}
}
Throwable updateSizeTracking(Iterable<SSTableReader> oldSSTables, Iterable<SSTableReader> newSSTables, Throwable accumulate)
{
if (isDummy())
return accumulate;
long add = 0;
for (SSTableReader sstable : newSSTables)
{
if (logger.isTraceEnabled())
logger.trace("adding {} to list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
try
{
add += sstable.bytesOnDisk();
}
catch (Throwable t)
{
accumulate = merge(accumulate, t);
}
}
long subtract = 0;
for (SSTableReader sstable : oldSSTables)
{
if (logger.isTraceEnabled())
logger.trace("removing {} from list of files tracked for {}.{}", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name);
try
{
subtract += sstable.bytesOnDisk();
}
catch (Throwable t)
{
accumulate = merge(accumulate, t);
}
}
StorageMetrics.load.inc(add - subtract);
cfstore.metric.liveDiskSpaceUsed.inc(add - subtract);
// we don't subtract from total until the sstable is deleted, see TransactionLogs.SSTableTidier
cfstore.metric.totalDiskSpaceUsed.inc(add);
return accumulate;
}
// SETUP / CLEANUP
public void addInitialSSTables(Iterable<SSTableReader> sstables)
{
if (!isDummy())
setupOnline(sstables);
apply(updateLiveSet(emptySet(), sstables));
maybeFail(updateSizeTracking(emptySet(), sstables, null));
// no notifications or backup necessary
}
public void addSSTables(Iterable<SSTableReader> sstables)
{
addInitialSSTables(sstables);
maybeIncrementallyBackup(sstables);
notifyAdded(sstables);
}
/** (Re)initializes the tracker, purging all references. */
@VisibleForTesting
public void reset()
{
view.set(new View(
!isDummy() ? ImmutableList.of(new Memtable(cfstore)) : Collections.<Memtable>emptyList(),
ImmutableList.<Memtable>of(),
Collections.<SSTableReader, SSTableReader>emptyMap(),
Collections.<SSTableReader, SSTableReader>emptyMap(),
SSTableIntervalTree.empty()));
}
public Throwable dropSSTablesIfInvalid(Throwable accumulate)
{
if (!isDummy() && !cfstore.isValid())
accumulate = dropSSTables(accumulate);
return accumulate;
}
public void dropSSTables()
{
maybeFail(dropSSTables(null));
}
public Throwable dropSSTables(Throwable accumulate)
{
return dropSSTables(Predicates.<SSTableReader>alwaysTrue(), OperationType.UNKNOWN, accumulate);
}
/**
* removes all sstables that are not busy compacting.
*/
public Throwable dropSSTables(final Predicate<SSTableReader> remove, OperationType operationType, Throwable accumulate)
{
try (LogTransaction txnLogs = new LogTransaction(operationType, this))
{
Pair<View, View> result = apply(view -> {
Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting))));
return updateLiveSet(toremove, emptySet()).apply(view);
});
Set<SSTableReader> removed = Sets.difference(result.left.sstables, result.right.sstables);
assert Iterables.all(removed, remove);
// It is important that any method accepting/returning a Throwable never throws an exception, and does its best
// to complete the instructions given to it
List<LogTransaction.Obsoletion> obsoletions = new ArrayList<>();
accumulate = prepareForObsoletion(removed, txnLogs, obsoletions, accumulate);
try
{
txnLogs.finish();
if (!removed.isEmpty())
{
accumulate = markObsolete(obsoletions, accumulate);
accumulate = updateSizeTracking(removed, emptySet(), accumulate);
accumulate = release(selfRefs(removed), accumulate);
// notifySSTablesChanged -> LeveledManifest.promote doesn't like a no-op "promotion"
accumulate = notifySSTablesChanged(removed, Collections.<SSTableReader>emptySet(), txnLogs.type(), accumulate);
}
}
catch (Throwable t)
{
accumulate = abortObsoletion(obsoletions, accumulate);
accumulate = Throwables.merge(accumulate, t);
}
}
catch (Throwable t)
{
accumulate = Throwables.merge(accumulate, t);
}
return accumulate;
}
/**
* Removes every SSTable in the directory from the Tracker's view.
* @param directory the unreadable directory, possibly with SSTables in it, but not necessarily.
*/
public void removeUnreadableSSTables(final File directory)
{
maybeFail(dropSSTables(new Predicate<SSTableReader>()
{
public boolean apply(SSTableReader reader)
{
return reader.descriptor.directory.equals(directory);
}
}, OperationType.UNKNOWN, null));
}
// FLUSHING
/**
* get the Memtable that the ordered writeOp should be directed to
*/
public Memtable getMemtableFor(OpOrder.Group opGroup, ReplayPosition replayPosition)
{
// since any new memtables appended to the list after we fetch it will be for operations started
// after us, we can safely assume that we will always find the memtable that 'accepts' us;
// if the barrier for any memtable is set whilst we are reading the list, it must accept us.
// there may be multiple memtables in the list that would 'accept' us, however we only ever choose
// the oldest such memtable, as accepts() only prevents us falling behind (i.e. ensures we don't
// assign operations to a memtable that was retired/queued before we started)
for (Memtable memtable : view.get().liveMemtables)
{
if (memtable.accepts(opGroup, replayPosition))
return memtable;
}
throw new AssertionError(view.get().liveMemtables.toString());
}
/**
* Switch the current memtable. This atomically appends a new memtable to the end of the list of active memtables,
* returning the previously last memtable. It leaves the previous Memtable in the list of live memtables until
* discarding(memtable) is called. These two methods must be synchronized/paired, i.e. m = switchMemtable
* must be followed by discarding(m), they cannot be interleaved.
*
* @return the previously active memtable
*/
public Memtable switchMemtable(boolean truncating)
{
Memtable newMemtable = new Memtable(cfstore);
Pair<View, View> result = apply(View.switchMemtable(newMemtable));
if (truncating)
notifyRenewed(newMemtable);
else
notifySwitched(result.left.getCurrentMemtable());
return result.left.getCurrentMemtable();
}
public void markFlushing(Memtable memtable)
{
apply(View.markFlushing(memtable));
}
public void replaceFlushed(Memtable memtable, Iterable<SSTableReader> sstables)
{
assert !isDummy();
if (sstables == null || Iterables.isEmpty(sstables))
{
// sstable may be null if we flushed batchlog and nothing needed to be retained
// if it's null, we don't care what state the cfstore is in, we just replace it and continue
apply(View.replaceFlushed(memtable, null));
return;
}
sstables.forEach(SSTableReader::setupOnline);
// back up before creating a new Snapshot (which makes the new one eligible for compaction)
maybeIncrementallyBackup(sstables);
apply(View.replaceFlushed(memtable, sstables));
Throwable fail;
fail = updateSizeTracking(emptySet(), sstables, null);
// TODO: if we're invalidated, should we notifyadded AND removed, or just skip both?
fail = notifyAdded(sstables, fail);
notifyDiscarded(memtable);
if (!isDummy() && !cfstore.isValid())
dropSSTables();
maybeFail(fail);
}
// MISCELLANEOUS public utility calls
public Set<SSTableReader> getCompacting()
{
return view.get().compacting;
}
public Iterable<SSTableReader> getUncompacting()
{
return view.get().sstables(SSTableSet.NONCOMPACTING);
}
public Iterable<SSTableReader> getUncompacting(Iterable<SSTableReader> candidates)
{
return view.get().getUncompacting(candidates);
}
public void maybeIncrementallyBackup(final Iterable<SSTableReader> sstables)
{
if (!DatabaseDescriptor.isIncrementalBackupsEnabled())
return;
for (SSTableReader sstable : sstables)
{
File backupsDir = Directories.getBackupsDirectory(sstable.descriptor);
sstable.createLinks(FileUtils.getCanonicalPath(backupsDir));
}
}
// NOTIFICATION
Throwable notifySSTablesChanged(Collection<SSTableReader> removed, Collection<SSTableReader> added, OperationType compactionType, Throwable accumulate)
{
INotification notification = new SSTableListChangedNotification(added, removed, compactionType);
for (INotificationConsumer subscriber : subscribers)
{
try
{
subscriber.handleNotification(notification, this);
}
catch (Throwable t)
{
accumulate = merge(accumulate, t);
}
}
return accumulate;
}
Throwable notifyAdded(Iterable<SSTableReader> added, Throwable accumulate)
{
INotification notification = new SSTableAddedNotification(added);
for (INotificationConsumer subscriber : subscribers)
{
try
{
subscriber.handleNotification(notification, this);
}
catch (Throwable t)
{
accumulate = merge(accumulate, t);
}
}
return accumulate;
}
public void notifyAdded(Iterable<SSTableReader> added)
{
maybeFail(notifyAdded(added, null));
}
public void notifySSTableRepairedStatusChanged(Collection<SSTableReader> repairStatusesChanged)
{
INotification notification = new SSTableRepairStatusChanged(repairStatusesChanged);
for (INotificationConsumer subscriber : subscribers)
subscriber.handleNotification(notification, this);
}
public void notifyDeleting(SSTableReader deleting)
{
INotification notification = new SSTableDeletingNotification(deleting);
for (INotificationConsumer subscriber : subscribers)
subscriber.handleNotification(notification, this);
}
public void notifyTruncated(long truncatedAt)
{
INotification notification = new TruncationNotification(truncatedAt);
for (INotificationConsumer subscriber : subscribers)
subscriber.handleNotification(notification, this);
}
public void notifyRenewed(Memtable renewed)
{
notify(new MemtableRenewedNotification(renewed));
}
public void notifySwitched(Memtable previous)
{
notify(new MemtableSwitchedNotification(previous));
}
public void notifyDiscarded(Memtable discarded)
{
notify(new MemtableDiscardedNotification(discarded));
}
private void notify(INotification notification)
{
for (INotificationConsumer subscriber : subscribers)
subscriber.handleNotification(notification, this);
}
public boolean isDummy()
{
return cfstore == null;
}
public void subscribe(INotificationConsumer consumer)
{
subscribers.add(consumer);
}
public void unsubscribe(INotificationConsumer consumer)
{
subscribers.remove(consumer);
}
private static Set<SSTableReader> emptySet()
{
return Collections.emptySet();
}
public View getView()
{
return view.get();
}
}
| RyanMagnusson/cassandra | src/java/org/apache/cassandra/db/lifecycle/Tracker.java | Java | apache-2.0 | 17,572 |
/*
* Copyright 2004-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.store.jdbc.index;
/**
* @author kimchy
*/
public class FetchOnOpenInputRAMAndFileOutputBelowThresholdTests extends AbstractIndexInputOutputTests {
protected void setUp() throws Exception {
super.setUp();
jdbcDirectory.getSettings().getDefaultFileEntrySettings().setLongSetting(
RAMAndFileJdbcIndexOutput.INDEX_OUTPUT_THRESHOLD_SETTING, 100);
}
protected Class indexInputClass() {
return FetchOnOpenJdbcIndexInput.class;
}
protected Class indexOutputClass() {
return RAMAndFileJdbcIndexOutput.class;
}
}
| baboune/compass | src/main/test/org/apache/lucene/store/jdbc/index/FetchOnOpenInputRAMAndFileOutputBelowThresholdTests.java | Java | apache-2.0 | 1,229 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.wicket.cdi.testapp;
import javax.enterprise.context.Conversation;
import javax.inject.Inject;
import org.apache.wicket.markup.html.WebPage;
import org.apache.wicket.markup.html.basic.Label;
import org.apache.wicket.markup.html.link.Link;
import org.apache.wicket.model.PropertyModel;
import org.apache.wicket.request.mapper.parameter.PageParameters;
/**
* @author jsarman
*/
public class TestConversationPage extends WebPage
{
private static final long serialVersionUID = 1L;
@Inject
Conversation conversation;
@Inject
TestConversationBean counter;
public TestConversationPage()
{
this(new PageParameters());
}
public TestConversationPage(final PageParameters parameters)
{
super(parameters);
conversation.begin();
System.out.println("Opened Conversion with id = " + conversation.getId());
add(new Label("count", new PropertyModel<String>(this, "counter.countStr")));
add(new Link<Void>("increment")
{
private static final long serialVersionUID = 1L;
@Override
public void onClick()
{
counter.increment();
}
});
add(new Link<Void>("next")
{
private static final long serialVersionUID = 1L;
@Override
public void onClick()
{
String pageType = parameters.get("pageType").toString("nonbookmarkable");
if ("bookmarkable".equals(pageType.toLowerCase()))
setResponsePage(TestNonConversationalPage.class);
else if ("hybrid".equals(pageType.toLowerCase()))
setResponsePage(TestConversationPage.this);
else
setResponsePage(new TestNonConversationalPage());
}
});
}
}
| dashorst/wicket | wicket-cdi-1.1/src/test/java/org/apache/wicket/cdi/testapp/TestConversationPage.java | Java | apache-2.0 | 2,394 |
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.transaction.impl.xa;
import com.hazelcast.transaction.TransactionContext;
import com.hazelcast.transaction.impl.Transaction;
public final class TransactionAccessor {
private TransactionAccessor() {
}
public static Transaction getTransaction(TransactionContext ctx) {
if (ctx instanceof XATransactionContextImpl) {
XATransactionContextImpl ctxImp = (XATransactionContextImpl) ctx;
return ctxImp.getTransaction();
}
throw new IllegalArgumentException();
}
}
| emre-aydin/hazelcast | hazelcast/src/main/java/com/hazelcast/transaction/impl/xa/TransactionAccessor.java | Java | apache-2.0 | 1,173 |
package action
import (
"errors"
boshas "github.com/cloudfoundry/bosh-agent/agent/applier/applyspec"
boshscript "github.com/cloudfoundry/bosh-agent/agent/script"
boshdrain "github.com/cloudfoundry/bosh-agent/agent/script/drain"
boshjobsuper "github.com/cloudfoundry/bosh-agent/jobsupervisor"
boshnotif "github.com/cloudfoundry/bosh-agent/notification"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
)
type DrainAction struct {
jobScriptProvider boshscript.JobScriptProvider
notifier boshnotif.Notifier
specService boshas.V1Service
jobSupervisor boshjobsuper.JobSupervisor
logTag string
logger boshlog.Logger
cancelCh chan struct{}
}
type DrainType string
const (
DrainTypeUpdate DrainType = "update"
DrainTypeStatus DrainType = "status"
DrainTypeShutdown DrainType = "shutdown"
)
func NewDrain(
notifier boshnotif.Notifier,
specService boshas.V1Service,
jobScriptProvider boshscript.JobScriptProvider,
jobSupervisor boshjobsuper.JobSupervisor,
logger boshlog.Logger,
) DrainAction {
return DrainAction{
notifier: notifier,
specService: specService,
jobScriptProvider: jobScriptProvider,
jobSupervisor: jobSupervisor,
logTag: "Drain Action",
logger: logger,
cancelCh: make(chan struct{}, 1),
}
}
func (a DrainAction) IsAsynchronous() bool {
return true
}
func (a DrainAction) IsPersistent() bool {
return false
}
func (a DrainAction) Run(drainType DrainType, newSpecs ...boshas.V1ApplySpec) (int, error) {
currentSpec, err := a.specService.Get()
if err != nil {
return 0, bosherr.WrapError(err, "Getting current spec")
}
params, err := a.determineParams(drainType, currentSpec, newSpecs)
if err != nil {
return 0, err
}
a.logger.Debug(a.logTag, "Unmonitoring")
err = a.jobSupervisor.Unmonitor()
if err != nil {
return 0, bosherr.WrapError(err, "Unmonitoring services")
}
var scripts []boshscript.Script
for _, job := range currentSpec.Jobs() {
script := a.jobScriptProvider.NewDrainScript(job.BundleName(), params)
scripts = append(scripts, script)
}
script := a.jobScriptProvider.NewParallelScript("drain", scripts)
resultsCh := make(chan error, 1)
go func() { resultsCh <- script.Run() }()
select {
case result := <-resultsCh:
a.logger.Debug(a.logTag, "Got a result")
return 0, result
case <-a.cancelCh:
a.logger.Debug(a.logTag, "Got a cancel request")
return 0, script.Cancel()
}
}
func (a DrainAction) determineParams(drainType DrainType, currentSpec boshas.V1ApplySpec, newSpecs []boshas.V1ApplySpec) (boshdrain.ScriptParams, error) {
var newSpec *boshas.V1ApplySpec
var params boshdrain.ScriptParams
if len(newSpecs) > 0 {
newSpec = &newSpecs[0]
}
switch drainType {
case DrainTypeStatus:
// Status was used in the past when dynamic drain was implemented in the Director.
// Now that we implement it in the agent, we should never get a call for this type.
return params, bosherr.Error("Unexpected call with drain type 'status'")
case DrainTypeUpdate:
if newSpec == nil {
return params, bosherr.Error("Drain update requires new spec")
}
params = boshdrain.NewUpdateParams(currentSpec, *newSpec)
case DrainTypeShutdown:
err := a.notifier.NotifyShutdown()
if err != nil {
return params, bosherr.WrapError(err, "Notifying shutdown")
}
params = boshdrain.NewShutdownParams(currentSpec, newSpec)
}
return params, nil
}
func (a DrainAction) Resume() (interface{}, error) {
return nil, errors.New("not supported")
}
func (a DrainAction) Cancel() error {
a.logger.Debug(a.logTag, "Cancelling drain action")
select {
case a.cancelCh <- struct{}{}:
default:
}
return nil
}
| cloudfoundry/bosh-init | vendor/github.com/cloudfoundry/bosh-agent/agent/action/drain.go | GO | apache-2.0 | 3,726 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.python.tasks2.gather_sources import GatherSources
from pants.backend.python.tasks2.python_binary_create import PythonBinaryCreate
from pants.backend.python.tasks2.select_interpreter import SelectInterpreter
from pants.base.run_info import RunInfo
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonBinaryCreateTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonBinaryCreate
def setUp(self):
super(PythonBinaryCreateTest, self).setUp()
self.library = self.create_python_library('src/python/lib', 'lib', {'lib.py': dedent("""
import os
def main():
os.getcwd()
""")})
self.binary = self.create_python_binary('src/python/bin', 'bin', 'lib.lib:main',
dependencies=['//src/python/lib'])
# The easiest way to create products required by the PythonBinaryCreate task is to
# execute the relevant tasks.
si_task_type = self.synthesize_task_subtype(SelectInterpreter, 'si_scope')
gs_task_type = self.synthesize_task_subtype(GatherSources, 'gs_scope')
self.task_context = self.context(for_task_types=[si_task_type, gs_task_type],
target_roots=[self.binary])
self.run_info_dir = os.path.join(self.pants_workdir, self.options_scope, 'test/info')
self.task_context.run_tracker.run_info = RunInfo(self.run_info_dir)
si_task_type(self.task_context, os.path.join(self.pants_workdir, 'si')).execute()
gs_task_type(self.task_context, os.path.join(self.pants_workdir, 'gs')).execute()
self.test_task = self.create_task(self.task_context)
self.dist_root = os.path.join(self.build_root, 'dist')
def _check_products(self, bin_name):
pex_name = '{}.pex'.format(bin_name)
products = self.task_context.products.get('deployable_archives')
self.assertIsNotNone(products)
product_data = products.get(self.binary)
product_basedir = product_data.keys()[0]
self.assertEquals(product_data[product_basedir], [pex_name])
# Check pex copy.
pex_copy = os.path.join(self.dist_root, pex_name)
self.assertTrue(os.path.isfile(pex_copy))
def test_deployable_archive_products(self):
self.test_task.execute()
self._check_products('bin')
| 15Dkatz/pants | tests/python/pants_test/backend/python/tasks2/test_python_binary_create.py | Python | apache-2.0 | 2,644 |
package org.traccar.protocol;
import org.junit.Test;
import org.traccar.ProtocolTest;
public class CguardProtocolDecoderTest extends ProtocolTest {
@Test
public void testDecode() throws Exception {
CguardProtocolDecoder decoder = new CguardProtocolDecoder(new CguardProtocol());
verifyNothing(decoder, text(
"IDRO:354868050655283"));
verifyPosition(decoder, text(
"NV:161007 122043:55.812730:37.733689:3.62:NAN:244.05:143.4"));
verifyPosition(decoder, text(
"NV:161007 122044:55.812732:37.733670:3.97:NAN:260.95:143.9"));
verifyAttributes(decoder, text(
"BC:161007 122044:CSQ1:77:NSQ1:18:BAT1:100"));
verifyPosition(decoder, text(
"NV:160711 044023:54.342907:48.582590:0:NAN:0:110.1"));
verifyPosition(decoder, text(
"NV:160711 044023:54.342907:-148.582590:0:NAN:0:110.1"));
verifyAttributes(decoder, text(
"BC:160711 044023:CSQ1:48:NSQ1:7:NSQ2:1:BAT1:98:PWR1:11.7:CLG1:NAN"));
verifyAttributes(decoder, text(
"BC:160711 044524:CSQ1:61:NSQ1:18:BAT1:98:PWR1:11.7:CLG1:NAN"));
verifyNothing(decoder, text(
"VERSION:3.3"));
verifyPosition(decoder, text(
"NV:160420 101902:55.799425:37.674033:0.94:NAN:213.59:156.6"));
verifyAttributes(decoder, text(
"BC:160628 081024:CSQ1:32:NSQ1:10:BAT1:100"));
verifyAttributes(decoder, text(
"BC:160628 081033:NSQ2:0"));
verifyPosition(decoder, text(
"NV:160630 151537:55.799913:37.674267:0.7:NAN:10.21:174.9"));
verifyAttributes(decoder, text(
"BC:160630 153316:BAT1:76"));
verifyAttributes(decoder, text(
"BC:160630 153543:NSQ2:0"));
verifyNothing(decoder, text(
"PING"));
}
}
| duke2906/traccar | test/org/traccar/protocol/CguardProtocolDecoderTest.java | Java | apache-2.0 | 1,939 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistCogdoCraneMoneyBagAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistCogdoCraneMoneyBagAI")
def setIndex(self, todo0):
pass
def requestInitial(self):
pass
| silly-wacky-3-town-toon/SOURCE-COD | toontown/cogdominium/DistCogdoCraneMoneyBagAI.py | Python | apache-2.0 | 351 |
#
# Lexical analyzer for JSON
# Copyright (C) 2003,2005 Rafael R. Sevilla <dido@imperium.ph>
# This file is part of JSON for Ruby
#
# JSON for Ruby is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# JSON for Ruby is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with JSON for Ruby; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
#
# Author:: Rafael R. Sevilla (mailto:dido@imperium.ph)
# Some bugs fixed by Adam Kramer (mailto:adam@the-kramers.net)
# Copyright:: Copyright (c) 2003,2005 Rafael R. Sevilla
# License:: GNU Lesser General Public License
#
require 'json/objects'
require 'cgi'
module JSON
VERSION ||= '1.1.2'
class Lexer
# This method will initialize the lexer to contain a string.
# =====Parameters
# +s+:: the string to initialize the lexer object with
def initialize(s)
@index = 0
@source = s
end
# Backs up the lexer status one character.
def back
@index -= 1 if @index > 0
end
def more?
return(@index < @source.length)
end
# Consumes the next character.
def nextchar
c = self.more?() ? @source[@index,1] : "\0"
@index += 1
return(c)
end
# Consumes the next character and check that it matches a specified
# character.
def nextmatch(char)
n = self.nextchar
raise "Expected '#{char}' and instead saw '#{n}'." if (n != char)
return(n)
end
# Read the next n characters from the string in the lexer.
# =====Parameters
# +n+:: the number of characters to read from the lexer
def nextchars(n)
raise "substring bounds error" if (@index + n > @source.length)
i = @index
@index += n
return(@source[i,n])
end
# Read the next n characters from the string with escape sequence
# processing.
def nextclean
while true
c = self.nextchar()
if (c == '/')
case self.nextchar()
when '/'
c = self.nextchar()
while c != "\n" && c != "\r" && c != "\0"
c = self.nextchar()
end
when '*'
while true
c = self.nextchar()
raise "unclosed comment" if (c == "\0")
if (c == '*')
break if (self.nextchar() == '/')
self.back()
end
end
else
self.back()
return '/';
end
elsif c == "\0" || c[0] > " "[0]
return(c)
end
end
end
# Given a Unicode code point, return a string giving its UTF-8
# representation based on RFC 2279.
def utf8str(code)
if (code & ~(0x7f)) == 0
# UCS-4 range 0x00000000 - 0x0000007F
return(code.chr)
end
buf = ""
if (code & ~(0x7ff)) == 0
# UCS-4 range 0x00000080 - 0x000007FF
buf << (0b11000000 | (code >> 6)).chr
buf << (0b10000000 | (code & 0b00111111)).chr
return(buf)
end
if (code & ~(0x000ffff)) == 0
# UCS-4 range 0x00000800 - 0x0000FFFF
buf << (0b11100000 | (code >> 12)).chr
buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr
buf << (0b10000000 | (code & 0b0011111)).chr
return(buf)
end
# Not used -- JSON only has UCS-2, but for the sake
# of completeness
if (code & ~(0x1FFFFF)) == 0
# UCS-4 range 0x00010000 - 0x001FFFFF
buf << (0b11110000 | (code >> 18)).chr
buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr
buf << (0b10000000 | (code & 0b0011111)).chr
return(buf)
end
if (code & ~(0x03FFFFFF)) == 0
# UCS-4 range 0x00200000 - 0x03FFFFFF
buf << (0b11110000 | (code >> 24)).chr
buf << (0b10000000 | ((code >> 18) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr
buf << (0b10000000 | (code & 0b0011111)).chr
return(buf)
end
# UCS-4 range 0x04000000 - 0x7FFFFFFF
buf << (0b11111000 | (code >> 30)).chr
buf << (0b10000000 | ((code >> 24) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 18) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 12) & 0b00111111)).chr
buf << (0b10000000 | ((code >> 6) & 0b00111111)).chr
buf << (0b10000000 | (code & 0b0011111)).chr
return(buf)
end
# Reads the next string, given a quote character (usually ' or ")
# =====Parameters
# +quot+: the next matching quote character to use
def nextstring(quot)
c = buf = ""
while true
c = self.nextchar()
case c
when /\0|\n\r/
raise "Unterminated string"
when "\\"
chr = self.nextchar()
case chr
when 'b'
buf << "\b"
when 't'
buf << "\t"
when 'n'
buf << "\n"
when 'f'
buf << "\f"
when 'r'
buf << "\r"
when 'u'
buf << utf8str(Integer("0x" + self.nextchars(4)))
else
buf << chr
end
else
return(buf) if (c == quot)
buf << c
end
end
end
# Reads the next group of characters that match a regular
# expresion.
#
def nextto(regex)
buf = ""
while (true)
c = self.nextchar()
if !(regex =~ c).nil? || c == '\0' || c == '\n' || c == '\r'
self.back() if (c != '\0')
return(buf.chomp())
end
buf += c
end
end
# Reads the next value from the string. This can return either a
# string, a FixNum, a floating point value, a JSON array, or a
# JSON object.
def nextvalue
c = self.nextclean
s = ""
case c
when /\"|\'/
return(self.nextstring(c))
when '{'
self.back()
return(Hash.new.from_json(self))
when '['
self.back()
return(Array.new.from_json(self))
else
buf = ""
while ((c =~ /"| |:|,|\]|\}|\/|\0/).nil?)
buf += c
c = self.nextchar()
end
self.back()
s = buf.chomp
case s
when "true"
return(true)
when "false"
return(false)
when "null"
return(nil)
when /^[0-9]|\.|-|\+/
if s =~ /[.]/ then
return Float(s)
else
return Integer(s)
end
end
if (s == "")
s = nil
end
return(s)
end
end
# Skip to the next instance of the character specified
# =====Parameters
# +to+:: Character to skip to
def skipto(to)
index = @index
loop {
c = self.nextchar()
if (c == '\0')
@index = index
return(c)
end
if (c == to)
self.back
return(c)
end
}
end
def unescape
@source = CGI::unescape(@source)
end
# Skip past the next instance of the character specified
# =====Parameters
# +to+:: the character to skip past
def skippast(to)
@index = @source.index(to, @index)
@index = (@index.nil?) ? @source.length : @index + to.length
end
def each
while (n = nextvalue)
yield(n)
end
end
end
end
| bizo/aws-tools | emr/elastic-mapreduce-ruby-20131216/json/lexer.rb | Ruby | apache-2.0 | 7,352 |
/*-
* * Copyright 2017 Skymind, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*/
package org.deeplearning4j.util;
import java.util.Random;
/**
* Created by Alex on 24/01/2017.
*/
public class RandomUtils {
/**
* Randomly shuffle the specified integer array using a Fisher-Yates shuffle algorithm
* @param toShuffle Array to shuffle
* @param random RNG to use for shuffling
*/
public static void shuffleInPlace(int[] toShuffle, Random random) {
//Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
for (int i = 0; i < toShuffle.length - 1; i++) {
int j = i + random.nextInt(toShuffle.length - i);
int temp = toShuffle[i];
toShuffle[i] = toShuffle[j];
toShuffle[j] = temp;
}
}
}
| shuodata/deeplearning4j | deeplearning4j-core/src/test/java/org/deeplearning4j/util/RandomUtils.java | Java | apache-2.0 | 1,403 |
package com.vaadin.tests.components.customcomponent;
import com.vaadin.tests.components.TestBase;
import com.vaadin.ui.Button.ClickEvent;
import com.vaadin.ui.Button.ClickListener;
import com.vaadin.ui.CustomComponent;
import com.vaadin.ui.NativeButton;
public class CustomComponentSizeUpdate extends TestBase {
@Override
protected void setup() {
NativeButton nb = new NativeButton(
"100%x100% button. Click to reduce CustomComponent size");
nb.setSizeFull();
final CustomComponent cc = new CustomComponent(nb);
cc.setWidth("500px");
cc.setHeight("500px");
nb.addClickListener(new ClickListener() {
@Override
public void buttonClick(ClickEvent event) {
cc.setWidth((cc.getWidth() - 20) + "px");
cc.setHeight((cc.getHeight() - 20) + "px");
}
});
addComponent(cc);
}
@Override
protected String getDescription() {
return "Click the button to reduce the size of the parent. The button should be resized to fit the parent.";
}
@Override
protected Integer getTicketNumber() {
return 3705;
}
}
| jdahlstrom/vaadin.react | uitest/src/main/java/com/vaadin/tests/components/customcomponent/CustomComponentSizeUpdate.java | Java | apache-2.0 | 1,199 |
public interface I {
void foo();
} | smmribeiro/intellij-community | plugins/kotlin/idea/tests/testData/findUsages/kotlin/findFunctionUsages/highlightingOfSuperUsages.1.java | Java | apache-2.0 | 38 |
/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2013 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
/*
-----------------------------------------------------------------------------
Filename: WindowEmbedding.cpp
Description: Stuff your windows full of OGRE
-----------------------------------------------------------------------------
*/
#include "Ogre.h"
using namespace Ogre;
void setupResources(void); // Just a prototype
void setupResources(void)
{
// Load resource paths from config file
ConfigFile cf;
cf.load("resources.cfg");
// Go through all sections & settings in the file
ConfigFile::SectionIterator seci = cf.getSectionIterator();
String secName, typeName, archName;
while (seci.hasMoreElements())
{
secName = seci.peekNextKey();
ConfigFile::SettingsMultiMap *settings = seci.getNext();
ConfigFile::SettingsMultiMap::iterator i;
for (i = settings->begin(); i != settings->end(); ++i)
{
typeName = i->first;
archName = i->second;
ResourceGroupManager::getSingleton().addResourceLocation(
archName, typeName, secName);
}
}
}
//---------------------------------------------------------------------
// Windows Test
//---------------------------------------------------------------------
#if OGRE_PLATFORM == OGRE_PLATFORM_WIN32
#include "windows.h"
RenderWindow* renderWindow = 0;
bool winActive = false;
bool winSizing = false;
LRESULT CALLBACK TestWndProc( HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam )
{
if (uMsg == WM_CREATE)
{
return 0;
}
if (!renderWindow)
return DefWindowProc(hWnd, uMsg, wParam, lParam);
switch( uMsg )
{
case WM_ACTIVATE:
winActive = (LOWORD(wParam) != WA_INACTIVE);
break;
case WM_ENTERSIZEMOVE:
winSizing = true;
break;
case WM_EXITSIZEMOVE:
renderWindow->windowMovedOrResized();
renderWindow->update();
winSizing = false;
break;
case WM_MOVE:
case WM_SIZE:
if (!winSizing)
renderWindow->windowMovedOrResized();
break;
case WM_GETMINMAXINFO:
// Prevent the window from going smaller than some min size
((MINMAXINFO*)lParam)->ptMinTrackSize.x = 100;
((MINMAXINFO*)lParam)->ptMinTrackSize.y = 100;
break;
case WM_CLOSE:
renderWindow->destroy(); // cleanup and call DestroyWindow
PostQuitMessage(0);
return 0;
case WM_PAINT:
if (!winSizing)
{
renderWindow->update();
return 0;
}
break;
}
return DefWindowProc( hWnd, uMsg, wParam, lParam );
}
INT WINAPI EmbeddedMain( HINSTANCE hInst, HINSTANCE, LPSTR strCmdLine, INT )
{
try
{
// Create a new window
// Style & size
DWORD dwStyle = WS_VISIBLE | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_OVERLAPPEDWINDOW;
// Register the window class
WNDCLASS wc = { 0, TestWndProc, 0, 0, hInst,
LoadIcon(0, IDI_APPLICATION), LoadCursor(NULL, IDC_ARROW),
(HBRUSH)GetStockObject(BLACK_BRUSH), 0, "TestWnd" };
RegisterClass(&wc);
HWND hwnd = CreateWindow("TestWnd", "Test embedding", dwStyle,
0, 0, 800, 600, 0, 0, hInst, 0);
Root root("", "");
root.loadPlugin("RenderSystem_GL");
//root.loadPlugin("RenderSystem_Direct3D9");
root.loadPlugin("Plugin_ParticleFX");
root.loadPlugin("Plugin_CgProgramManager");
// select first renderer & init with no window
root.setRenderSystem(*(root.getAvailableRenderers().begin()));
root.initialise(false);
// create first window manually
NameValuePairList options;
options["externalWindowHandle"] =
StringConverter::toString((size_t)hwnd);
renderWindow = root.createRenderWindow("embedded", 800, 600, false, &options);
setupResources();
ResourceGroupManager::getSingleton().initialiseAllResourceGroups();
SceneManager *scene = root.createSceneManager(Ogre::ST_GENERIC, "default");
Camera *cam = scene->createCamera("cam");
Viewport* vp = renderWindow->addViewport(cam);
vp->setBackgroundColour(Ogre::ColourValue(0.5, 0.5, 0.7));
cam->setAutoAspectRatio(true);
cam->setPosition(0,0,300);
cam->setDirection(0,0,-1);
Entity* e = scene->createEntity("1", "ogrehead.mesh");
scene->getRootSceneNode()->createChildSceneNode()->attachObject(e);
Light* l = scene->createLight("l");
l->setPosition(300, 100, -100);
// message loop
MSG msg;
while(GetMessage(&msg, NULL, 0, 0 ) != 0)
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
catch( Exception& e )
{
MessageBox( NULL, e.getFullDescription().c_str(),
"An exception has occurred!", MB_OK | MB_ICONERROR | MB_TASKMODAL);
}
return 0;
}
#endif
| cesarpazguzman/The-Eternal-Sorrow | dependencies/Ogre/Tests/PlayPen/src/WindowEmbedding.cpp | C++ | apache-2.0 | 5,719 |
//===-- main.c --------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdint.h>
// This simple program is to test the lldb Python API related to process.
char my_char = 'u';
char my_cstring[] = "lldb.SBProcess.ReadCStringFromMemory() works!";
char *my_char_ptr = (char *)"Does it work?";
uint32_t my_uint32 = 12345;
int my_int = 0;
int main (int argc, char const *argv[])
{
for (int i = 0; i < 3; ++i) {
printf("my_char='%c'\n", my_char);
++my_char;
}
printf("after the loop: my_char='%c'\n", my_char); // 'my_char' should print out as 'x'.
return 0; // Set break point at this line and check variable 'my_char'.
// Use lldb Python API to set memory content for my_int and check the result.
}
| apple/swift-lldb | packages/Python/lldbsuite/test/python_api/process/main.cpp | C++ | apache-2.0 | 1,072 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using Microsoft.CodeAnalysis.CSharp.Syntax;
using Microsoft.CodeAnalysis.CSharp.Test.Utilities;
using Microsoft.CodeAnalysis.Test.Utilities;
using Xunit;
namespace Microsoft.CodeAnalysis.CSharp.UnitTests
{
public partial class IOperationTests : SemanticModelTestBase
{
[CompilerTrait(CompilerFeature.IOperation)]
[Fact]
public void IConditionalAccessExpression_SimpleMethodAccess()
{
string source = @"
using System;
public class C1
{
public void M()
{
var o = new object();
/*<bind>*/o?.ToString()/*</bind>*/;
}
}
";
string expectedOperationTree = @"
IConditionalAccessOperation (OperationKind.ConditionalAccess, Type: System.String) (Syntax: 'o?.ToString()')
Operation:
ILocalReferenceOperation: o (OperationKind.LocalReference, Type: System.Object) (Syntax: 'o')
WhenNotNull:
IInvocationOperation (virtual System.String System.Object.ToString()) (OperationKind.Invocation, Type: System.String) (Syntax: '.ToString()')
Instance Receiver:
IConditionalAccessInstanceOperation (OperationKind.ConditionalAccessInstance, Type: System.Object, IsImplicit) (Syntax: 'o')
Arguments(0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyOperationTreeAndDiagnosticsForTest<ConditionalAccessExpressionSyntax>(source, expectedOperationTree, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation)]
[Fact]
public void IConditionalAccessExpression_SimplePropertyAccess()
{
string source = @"
using System;
public class C1
{
int Prop1 { get; }
public void M()
{
C1 c1 = null;
var prop = /*<bind>*/c1?.Prop1/*</bind>*/;
}
}
";
string expectedOperationTree = @"
IConditionalAccessOperation (OperationKind.ConditionalAccess, Type: System.Int32?) (Syntax: 'c1?.Prop1')
Operation:
ILocalReferenceOperation: c1 (OperationKind.LocalReference, Type: C1) (Syntax: 'c1')
WhenNotNull:
IPropertyReferenceOperation: System.Int32 C1.Prop1 { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Prop1')
Instance Receiver:
IConditionalAccessInstanceOperation (OperationKind.ConditionalAccessInstance, Type: C1, IsImplicit) (Syntax: 'c1')
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyOperationTreeAndDiagnosticsForTest<ConditionalAccessExpressionSyntax>(source, expectedOperationTree, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_01()
{
string source = @"
class P
{
void M1(System.Array input, int? result)
/*<bind>*/{
result = input?.Length;
}/*</bind>*/
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [2]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2}
.locals {R2}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: System.Array) (Syntax: 'input')
Jump if True (Regular) to Block[B4]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Array, IsImplicit) (Syntax: 'input')
Leaving: {R2}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Length')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsImplicit) (Syntax: '.Length')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IPropertyReferenceOperation: System.Int32 System.Array.Length { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Length')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Array, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Leaving: {R2}
}
Block[B4] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B3] [B4]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = input?.Length;')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Length')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Length')
Next (Regular) Block[B6]
Leaving: {R1}
}
Block[B6] - Exit
Predecessors: [B5]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_02()
{
string source = @"
class P
{
void M1(int? input, string result)
/*<bind>*/{
result = input?.ToString();
}/*</bind>*/
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [2]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.String) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2}
.locals {R2}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'input')
Jump if True (Regular) to Block[B4]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input')
Leaving: {R2}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.ToString()')
Value:
IInvocationOperation (virtual System.String System.Int32.ToString()) (OperationKind.Invocation, Type: System.String) (Syntax: '.ToString()')
Instance Receiver:
IInvocationOperation ( System.Int32 System.Int32?.GetValueOrDefault()) (OperationKind.Invocation, Type: System.Int32, IsImplicit) (Syntax: 'input')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input')
Arguments(0)
Arguments(0)
Next (Regular) Block[B5]
Leaving: {R2}
}
Block[B4] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B3] [B4]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = in ... ToString();')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.String) (Syntax: 'result = in ... .ToString()')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input?.ToString()')
Next (Regular) Block[B6]
Leaving: {R1}
}
Block[B6] - Exit
Predecessors: [B5]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_03()
{
string source = @"
class P
{
void M1(P input, int? result)
/*<bind>*/{
result = input?.Access();
}/*</bind>*/
int? Access() => null;
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [2]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2}
.locals {R2}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P) (Syntax: 'input')
Jump if True (Regular) to Block[B4]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input')
Leaving: {R2}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access()')
Value:
IInvocationOperation ( System.Int32? P.Access()) (OperationKind.Invocation, Type: System.Int32?) (Syntax: '.Access()')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input')
Arguments(0)
Next (Regular) Block[B5]
Leaving: {R2}
}
Block[B4] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B3] [B4]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = in ... ?.Access();')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Access()')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Access()')
Next (Regular) Block[B6]
Leaving: {R1}
}
Block[B6] - Exit
Predecessors: [B5]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_04()
{
string source = @"
class P
{
void M1(P input, P result)
/*<bind>*/{
result = (input?[11]?.Access1())?[22]?.Access2();
}/*</bind>*/
P this[int x] => null;
P[] Access1() => null;
P Access2() => null;
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [5]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: P) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2} {R3} {R4} {R5}
.locals {R2}
{
CaptureIds: [4]
.locals {R3}
{
CaptureIds: [3]
.locals {R4}
{
CaptureIds: [2]
.locals {R5}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P) (Syntax: 'input')
Jump if True (Regular) to Block[B6]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input')
Leaving: {R5} {R4}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[11]')
Value:
IPropertyReferenceOperation: P P.this[System.Int32 x] { get; } (OperationKind.PropertyReference, Type: P) (Syntax: '[11]')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'input')
Arguments(1):
IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: '11')
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 11) (Syntax: '11')
InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
Next (Regular) Block[B4]
Leaving: {R5}
}
Block[B4] - Block
Predecessors: [B3]
Statements (0)
Jump if True (Regular) to Block[B6]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[11]')
Operand:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[11]')
Leaving: {R4}
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B4]
Statements (1)
IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access1()')
Value:
IInvocationOperation ( P[] P.Access1()) (OperationKind.Invocation, Type: P[]) (Syntax: '.Access1()')
Instance Receiver:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[11]')
Arguments(0)
Next (Regular) Block[B7]
Leaving: {R4}
}
Block[B6] - Block
Predecessors: [B2] [B4]
Statements (1)
IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input?[11]?.Access1()')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: P[], Constant: null, IsImplicit) (Syntax: 'input?[11]?.Access1()')
Next (Regular) Block[B7]
Block[B7] - Block
Predecessors: [B5] [B6]
Statements (0)
Jump if True (Regular) to Block[B11]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input?[11]?.Access1()')
Operand:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: 'input?[11]?.Access1()')
Leaving: {R3} {R2}
Next (Regular) Block[B8]
Block[B8] - Block
Predecessors: [B7]
Statements (1)
IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[22]')
Value:
IArrayElementReferenceOperation (OperationKind.ArrayElementReference, Type: P) (Syntax: '[22]')
Array reference:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: 'input?[11]?.Access1()')
Indices(1):
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 22) (Syntax: '22')
Next (Regular) Block[B9]
Leaving: {R3}
}
Block[B9] - Block
Predecessors: [B8]
Statements (0)
Jump if True (Regular) to Block[B11]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[22]')
Operand:
IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[22]')
Leaving: {R2}
Next (Regular) Block[B10]
Block[B10] - Block
Predecessors: [B9]
Statements (1)
IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access2()')
Value:
IInvocationOperation ( P P.Access2()) (OperationKind.Invocation, Type: P) (Syntax: '.Access2()')
Instance Receiver:
IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '[22]')
Arguments(0)
Next (Regular) Block[B12]
Leaving: {R2}
}
Block[B11] - Block
Predecessors: [B7] [B9]
Statements (1)
IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: P, Constant: null, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()')
Next (Regular) Block[B12]
Block[B12] - Block
Predecessors: [B10] [B11]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = (i ... .Access2();')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: P) (Syntax: 'result = (i ... ?.Access2()')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 5 (OperationKind.FlowCaptureReference, Type: P, IsImplicit) (Syntax: '(input?[11] ... ?.Access2()')
Next (Regular) Block[B13]
Leaving: {R1}
}
Block[B13] - Exit
Predecessors: [B12]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_05()
{
string source = @"
struct P
{
void M1(P? input, P? result)
/*<bind>*/{
result = (input?.Access1()?[11])?[22]?.Access2();
}/*</bind>*/
P? this[int x] => default;
P[] Access1() => default;
P Access2() => default;
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [5]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: P?) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2} {R3} {R4} {R5}
.locals {R2}
{
CaptureIds: [4]
.locals {R3}
{
CaptureIds: [3]
.locals {R4}
{
CaptureIds: [2]
.locals {R5}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P?) (Syntax: 'input')
Jump if True (Regular) to Block[B6]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input')
Leaving: {R5} {R4}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access1()')
Value:
IInvocationOperation ( P[] P.Access1()) (OperationKind.Invocation, Type: P[]) (Syntax: '.Access1()')
Instance Receiver:
IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: 'input')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input')
Arguments(0)
Arguments(0)
Next (Regular) Block[B4]
Leaving: {R5}
}
Block[B4] - Block
Predecessors: [B3]
Statements (0)
Jump if True (Regular) to Block[B6]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '.Access1()')
Operand:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: '.Access1()')
Leaving: {R4}
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B4]
Statements (1)
IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[11]')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: P?, IsImplicit) (Syntax: '[11]')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IArrayElementReferenceOperation (OperationKind.ArrayElementReference, Type: P) (Syntax: '[11]')
Array reference:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: P[], IsImplicit) (Syntax: '.Access1()')
Indices(1):
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 11) (Syntax: '11')
Next (Regular) Block[B7]
Leaving: {R4}
}
Block[B6] - Block
Predecessors: [B2] [B4]
Statements (1)
IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Next (Regular) Block[B7]
Block[B7] - Block
Predecessors: [B5] [B6]
Statements (0)
Jump if True (Regular) to Block[B11]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Operand:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Leaving: {R3} {R2}
Next (Regular) Block[B8]
Block[B8] - Block
Predecessors: [B7]
Statements (1)
IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '[22]')
Value:
IPropertyReferenceOperation: P? P.this[System.Int32 x] { get; } (OperationKind.PropertyReference, Type: P?) (Syntax: '[22]')
Instance Receiver:
IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Instance Receiver:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input?.Access1()?[11]')
Arguments(0)
Arguments(1):
IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: '22')
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 22) (Syntax: '22')
InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
Next (Regular) Block[B9]
Leaving: {R3}
}
Block[B9] - Block
Predecessors: [B8]
Statements (0)
Jump if True (Regular) to Block[B11]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: '[22]')
Operand:
IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '[22]')
Leaving: {R2}
Next (Regular) Block[B10]
Block[B10] - Block
Predecessors: [B9]
Statements (1)
IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Access2()')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: P?, IsImplicit) (Syntax: '.Access2()')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IInvocationOperation ( P P.Access2()) (OperationKind.Invocation, Type: P) (Syntax: '.Access2()')
Instance Receiver:
IInvocationOperation ( P P?.GetValueOrDefault()) (OperationKind.Invocation, Type: P, IsImplicit) (Syntax: '[22]')
Instance Receiver:
IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '[22]')
Arguments(0)
Arguments(0)
Next (Regular) Block[B12]
Leaving: {R2}
}
Block[B11] - Block
Predecessors: [B7] [B9]
Statements (1)
IFlowCaptureOperation: 5 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: P?, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()')
Next (Regular) Block[B12]
Block[B12] - Block
Predecessors: [B10] [B11]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = (i ... .Access2();')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: P?) (Syntax: 'result = (i ... ?.Access2()')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 5 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: '(input?.Acc ... ?.Access2()')
Next (Regular) Block[B13]
Leaving: {R1}
}
Block[B13] - Exit
Predecessors: [B12]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_06()
{
string source = @"
struct P
{
void M1(S1? x)
/*<bind>*/{
x?.P1 = 0;
}/*</bind>*/
}
public struct S1
{
public int P1 { get; set; }
}";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1} {R2}
.locals {R1}
{
CaptureIds: [1]
.locals {R2}
{
CaptureIds: [0]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x')
Value:
IParameterReferenceOperation: x (OperationKind.ParameterReference, Type: S1?, IsInvalid) (Syntax: 'x')
Jump if True (Regular) to Block[B3]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsInvalid, IsImplicit) (Syntax: 'x')
Operand:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x')
Leaving: {R2}
Next (Regular) Block[B2]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: '.P1')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: '.P1')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IPropertyReferenceOperation: System.Int32 S1.P1 { get; set; } (OperationKind.PropertyReference, Type: System.Int32, IsInvalid) (Syntax: '.P1')
Instance Receiver:
IInvocationOperation ( S1 S1?.GetValueOrDefault()) (OperationKind.Invocation, Type: S1, IsInvalid, IsImplicit) (Syntax: 'x')
Instance Receiver:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x')
Arguments(0)
Next (Regular) Block[B4]
Leaving: {R2}
}
Block[B3] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x')
Next (Regular) Block[B4]
Block[B4] - Block
Predecessors: [B2] [B3]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null, IsInvalid) (Syntax: 'x?.P1 = 0;')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?, IsInvalid) (Syntax: 'x?.P1 = 0')
Left:
IInvalidOperation (OperationKind.Invalid, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1')
Children(1):
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1')
Right:
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 0) (Syntax: '0')
Next (Regular) Block[B5]
Leaving: {R1}
}
Block[B5] - Exit
Predecessors: [B4]
Statements (0)
";
var expectedDiagnostics = new[] {
// file.cs(6,9): error CS0131: The left-hand side of an assignment must be a variable, property or indexer
// x?.P1 = 0;
Diagnostic(ErrorCode.ERR_AssgLvalueExpected, "x?.P1").WithLocation(6, 9)
};
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_07()
{
string source = @"
struct P
{
void M1(S1? x)
/*<bind>*/{
x?.P1 = 0;
}/*</bind>*/
}
public struct S1
{
public int P1;
}";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1} {R2}
.locals {R1}
{
CaptureIds: [1]
.locals {R2}
{
CaptureIds: [0]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x')
Value:
IParameterReferenceOperation: x (OperationKind.ParameterReference, Type: S1?, IsInvalid) (Syntax: 'x')
Jump if True (Regular) to Block[B3]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsInvalid, IsImplicit) (Syntax: 'x')
Operand:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x')
Leaving: {R2}
Next (Regular) Block[B2]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: '.P1')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: '.P1')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IFieldReferenceOperation: System.Int32 S1.P1 (OperationKind.FieldReference, Type: System.Int32, IsInvalid) (Syntax: '.P1')
Instance Receiver:
IInvocationOperation ( S1 S1?.GetValueOrDefault()) (OperationKind.Invocation, Type: S1, IsInvalid, IsImplicit) (Syntax: 'x')
Instance Receiver:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: S1?, IsInvalid, IsImplicit) (Syntax: 'x')
Arguments(0)
Next (Regular) Block[B4]
Leaving: {R2}
}
Block[B3] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsInvalid, IsImplicit) (Syntax: 'x')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x')
Next (Regular) Block[B4]
Block[B4] - Block
Predecessors: [B2] [B3]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null, IsInvalid) (Syntax: 'x?.P1 = 0;')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?, IsInvalid) (Syntax: 'x?.P1 = 0')
Left:
IInvalidOperation (OperationKind.Invalid, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1')
Children(1):
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsInvalid, IsImplicit) (Syntax: 'x?.P1')
Right:
ILiteralOperation (OperationKind.Literal, Type: System.Int32, Constant: 0) (Syntax: '0')
Next (Regular) Block[B5]
Leaving: {R1}
}
Block[B5] - Exit
Predecessors: [B4]
Statements (0)
";
var expectedDiagnostics = new[] {
// file.cs(6,9): error CS0131: The left-hand side of an assignment must be a variable, property or indexer
// x?.P1 = 0;
Diagnostic(ErrorCode.ERR_AssgLvalueExpected, "x?.P1").WithLocation(6, 9)
};
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_08()
{
string source = @"
struct P
{
void M1(P? input, int? result)
/*<bind>*/{
result = input?.Length;
}/*</bind>*/
public int Length { get; }
}
";
var compilation = CreateCompilationWithMscorlib45(source, parseOptions: TestOptions.RegularWithFlowAnalysisFeature);
compilation.MakeMemberMissing(SpecialMember.System_Nullable_T_GetValueOrDefault);
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [2]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'result')
Value:
IParameterReferenceOperation: result (OperationKind.ParameterReference, Type: System.Int32?) (Syntax: 'result')
Next (Regular) Block[B2]
Entering: {R2}
.locals {R2}
{
CaptureIds: [1]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IParameterReferenceOperation: input (OperationKind.ParameterReference, Type: P?) (Syntax: 'input')
Jump if True (Regular) to Block[B4]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input')
Leaving: {R2}
Next (Regular) Block[B3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.Length')
Value:
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.Int32?, IsImplicit) (Syntax: '.Length')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
(ImplicitNullable)
Operand:
IPropertyReferenceOperation: System.Int32 P.Length { get; } (OperationKind.PropertyReference, Type: System.Int32) (Syntax: '.Length')
Instance Receiver:
IInvalidOperation (OperationKind.Invalid, Type: P, IsImplicit) (Syntax: 'input')
Children(1):
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: P?, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Leaving: {R2}
}
Block[B4] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.Int32?, IsImplicit) (Syntax: 'input')
Next (Regular) Block[B5]
Block[B5] - Block
Predecessors: [B3] [B4]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'result = input?.Length;')
Expression:
ISimpleAssignmentOperation (OperationKind.SimpleAssignment, Type: System.Int32?) (Syntax: 'result = input?.Length')
Left:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'result')
Right:
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.Int32?, IsImplicit) (Syntax: 'input?.Length')
Next (Regular) Block[B6]
Leaving: {R1}
}
Block[B6] - Exit
Predecessors: [B5]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(compilation, expectedGraph, expectedDiagnostics);
}
[CompilerTrait(CompilerFeature.IOperation, CompilerFeature.Dataflow)]
[Fact]
public void ConditionalAccessFlow_09()
{
string source = @"
class C
{
void M1(C input1, C input2, C input3)
/*<bind>*/{
input1?.M(input2?.M(input3?.M(null)));
}/*</bind>*/
public string M(string x) => x;
}
";
string expectedGraph = @"
Block[B0] - Entry
Statements (0)
Next (Regular) Block[B1]
Entering: {R1}
.locals {R1}
{
CaptureIds: [0] [2]
Block[B1] - Block
Predecessors: [B0]
Statements (1)
IFlowCaptureOperation: 0 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input1')
Value:
IParameterReferenceOperation: input1 (OperationKind.ParameterReference, Type: C) (Syntax: 'input1')
Jump if True (Regular) to Block[B9]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input1')
Operand:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input1')
Leaving: {R1}
Next (Regular) Block[B2]
Entering: {R2}
.locals {R2}
{
CaptureIds: [1] [4]
Block[B2] - Block
Predecessors: [B1]
Statements (1)
IFlowCaptureOperation: 1 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input2')
Value:
IParameterReferenceOperation: input2 (OperationKind.ParameterReference, Type: C) (Syntax: 'input2')
Jump if True (Regular) to Block[B7]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input2')
Operand:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input2')
Leaving: {R2}
Next (Regular) Block[B3]
Entering: {R3}
.locals {R3}
{
CaptureIds: [3]
Block[B3] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 3 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input3')
Value:
IParameterReferenceOperation: input3 (OperationKind.ParameterReference, Type: C) (Syntax: 'input3')
Jump if True (Regular) to Block[B5]
IIsNullOperation (OperationKind.IsNull, Type: System.Boolean, IsImplicit) (Syntax: 'input3')
Operand:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input3')
Leaving: {R3}
Next (Regular) Block[B4]
Block[B4] - Block
Predecessors: [B3]
Statements (1)
IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.M(null)')
Value:
IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(null)')
Instance Receiver:
IFlowCaptureReferenceOperation: 3 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input3')
Arguments(1):
IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'null')
IConversionOperation (TryCast: False, Unchecked) (OperationKind.Conversion, Type: System.String, Constant: null, IsImplicit) (Syntax: 'null')
Conversion: CommonConversion (Exists: True, IsIdentity: False, IsNumeric: False, IsReference: True, IsUserDefined: False) (MethodSymbol: null)
(ImplicitReference)
Operand:
ILiteralOperation (OperationKind.Literal, Type: null, Constant: null) (Syntax: 'null')
InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
Next (Regular) Block[B6]
Leaving: {R3}
}
Block[B5] - Block
Predecessors: [B3]
Statements (1)
IFlowCaptureOperation: 4 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input3')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input3')
Next (Regular) Block[B6]
Block[B6] - Block
Predecessors: [B4] [B5]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: '.M(input3?.M(null))')
Value:
IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(input3?.M(null))')
Instance Receiver:
IFlowCaptureReferenceOperation: 1 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input2')
Arguments(1):
IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'input3?.M(null)')
IFlowCaptureReferenceOperation: 4 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input3?.M(null)')
InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
Next (Regular) Block[B8]
Leaving: {R2}
}
Block[B7] - Block
Predecessors: [B2]
Statements (1)
IFlowCaptureOperation: 2 (OperationKind.FlowCapture, Type: null, IsImplicit) (Syntax: 'input2')
Value:
IDefaultValueOperation (OperationKind.DefaultValue, Type: System.String, Constant: null, IsImplicit) (Syntax: 'input2')
Next (Regular) Block[B8]
Block[B8] - Block
Predecessors: [B6] [B7]
Statements (1)
IExpressionStatementOperation (OperationKind.ExpressionStatement, Type: null) (Syntax: 'input1?.M(i ... .M(null)));')
Expression:
IInvocationOperation ( System.String C.M(System.String x)) (OperationKind.Invocation, Type: System.String) (Syntax: '.M(input2?. ... ?.M(null)))')
Instance Receiver:
IFlowCaptureReferenceOperation: 0 (OperationKind.FlowCaptureReference, Type: C, IsImplicit) (Syntax: 'input1')
Arguments(1):
IArgumentOperation (ArgumentKind.Explicit, Matching Parameter: x) (OperationKind.Argument, Type: null) (Syntax: 'input2?.M(i ... 3?.M(null))')
IFlowCaptureReferenceOperation: 2 (OperationKind.FlowCaptureReference, Type: System.String, IsImplicit) (Syntax: 'input2?.M(i ... 3?.M(null))')
InConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
OutConversion: CommonConversion (Exists: True, IsIdentity: True, IsNumeric: False, IsReference: False, IsUserDefined: False) (MethodSymbol: null)
Next (Regular) Block[B9]
Leaving: {R1}
}
Block[B9] - Exit
Predecessors: [B1] [B8]
Statements (0)
";
var expectedDiagnostics = DiagnosticDescription.None;
VerifyFlowGraphAndDiagnosticsForTest<BlockSyntax>(source, expectedGraph, expectedDiagnostics);
}
}
}
| OmarTawfik/roslyn | src/Compilers/CSharp/Test/Semantic/IOperation/IOperationTests_IConditionalAccessExpression.cs | C# | apache-2.0 | 55,769 |
/*
* Copyright 2013 Red Hat Inc. and/or its affiliates and other contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.switchyard.config.model.switchyard;
import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.switchyard.config.model.ModelPuller;
import org.switchyard.config.model.composite.SCABindingModel;
import org.switchyard.config.model.composite.v1.V1SCABindingModel;
/**
* SCABindingExtensionTests.
*/
public class SCABindingExtensionTests {
private static final String SCA_BINDING_XML = "/org/switchyard/config/model/switchyard/SCABindingTests.xml";
private ModelPuller<SwitchYardModel> _puller;
@Before
public void before() throws Exception {
_puller = new ModelPuller<SwitchYardModel>();
}
@Test
public void testCreate() throws Exception {
final String TARGET = "foo";
final String TARGET_NS = "urn:bar";
final String STRATEGY = "RoundRobin";
SCABindingModel scab = new V1SCABindingModel(SwitchYardNamespace.DEFAULT.uri());
scab.setClustered(true)
.setLoadBalance(STRATEGY)
.setTarget(TARGET)
.setTargetNamespace(TARGET_NS);
Assert.assertEquals(STRATEGY, scab.getLoadBalance());
Assert.assertEquals(TARGET, scab.getTarget());
Assert.assertEquals(TARGET_NS, scab.getTargetNamespace());
Assert.assertTrue(scab.isClustered());
Assert.assertTrue(scab.hasTarget());
Assert.assertTrue(scab.hasTargetNamespace());
Assert.assertTrue(scab.isLoadBalanced());
}
@Test
public void testRead() throws Exception {
SwitchYardModel switchyard = _puller.pull(SCA_BINDING_XML, getClass());
SCABindingModel sb = (SCABindingModel)switchyard.getComposite().getServices().get(0).getBindings().get(0);
SCABindingModel rb = (SCABindingModel)switchyard.getComposite().getReferences().get(0).getBindings().get(0);
Assert.assertTrue(sb.isClustered());
Assert.assertFalse(sb.isLoadBalanced());
Assert.assertFalse(sb.hasTarget());
Assert.assertFalse(sb.hasTargetNamespace());
Assert.assertTrue(rb.isClustered());
Assert.assertEquals("RoundRobin", rb.getLoadBalance());
Assert.assertEquals("somethingElse", rb.getTarget());
Assert.assertEquals("urn:another:uri", rb.getTargetNamespace());
}
@Test
public void testValidation() throws Exception {
SwitchYardModel switchyard = _puller.pull(SCA_BINDING_XML, getClass());
switchyard.assertModelValid();
}
}
| tadayosi/switchyard | core/config/src/test/java/org/switchyard/config/model/switchyard/SCABindingExtensionTests.java | Java | apache-2.0 | 3,141 |
/* Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package go_kafka_client
import (
"fmt"
"github.com/Shopify/sarama"
"math/rand"
"sync"
"testing"
"time"
)
var numMessages = 1000
var consumeTimeout = 1 * time.Minute
var localZk = "localhost:2181"
var localBroker = "localhost:9092"
func TestConsumerWithInconsistentProducing(t *testing.T) {
consumeStatus := make(chan int)
produceMessages := 1
consumeMessages := 2
sleepTime := 10 * time.Second
timeout := 30 * time.Second
topic := fmt.Sprintf("inconsistent-producing-%d", time.Now().Unix())
//create topic
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
Infof("test", "Produce %d message", produceMessages)
go produceN(t, produceMessages, topic, localBroker)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, consumeMessages, timeout, consumeStatus)
consumer := NewConsumer(config)
Info("test", "Starting consumer")
go consumer.StartStatic(map[string]int{topic: 1})
//produce one more message after 10 seconds
Infof("test", "Waiting for %s before producing another message", sleepTime)
time.Sleep(sleepTime)
Infof("test", "Produce %d message", produceMessages)
go produceN(t, produceMessages, topic, localBroker)
//make sure we get 2 messages
if actual := <-consumeStatus; actual != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, timeout, actual)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestStaticConsumingSinglePartition(t *testing.T) {
consumeStatus := make(chan int)
topic := fmt.Sprintf("test-static-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
go produceN(t, numMessages, topic, localBroker)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, numMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
if actual := <-consumeStatus; actual != numMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestStaticConsumingMultiplePartitions(t *testing.T) {
consumeStatus := make(chan int)
topic := fmt.Sprintf("test-static-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, 5)
EnsureHasLeader(localZk, topic)
go produceN(t, numMessages, topic, localBroker)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, numMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 3})
if actual := <-consumeStatus; actual != numMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestWhitelistConsumingSinglePartition(t *testing.T) {
consumeStatus := make(chan int)
timestamp := time.Now().Unix()
topic1 := fmt.Sprintf("test-whitelist-%d-1", timestamp)
topic2 := fmt.Sprintf("test-whitelist-%d-2", timestamp)
CreateMultiplePartitionsTopic(localZk, topic1, 1)
EnsureHasLeader(localZk, topic1)
CreateMultiplePartitionsTopic(localZk, topic2, 1)
EnsureHasLeader(localZk, topic2)
go produceN(t, numMessages, topic1, localBroker)
go produceN(t, numMessages, topic2, localBroker)
expectedMessages := numMessages * 2
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, expectedMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartWildcard(NewWhiteList(fmt.Sprintf("test-whitelist-%d-.+", timestamp)), 1)
if actual := <-consumeStatus; actual != expectedMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", expectedMessages, consumeTimeout, actual)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestStaticPartitionConsuming(t *testing.T) {
consumeStatus := make(chan int)
timestamp := time.Now().Unix()
topic := fmt.Sprintf("test-static-partitions-%d", timestamp)
CreateMultiplePartitionsTopic(localZk, topic, 2)
EnsureHasLeader(localZk, topic)
go produceN(t, numMessages, topic, localBroker)
checkPartition := int32(0)
// expectedMessages := numMessages * 2
config := testConsumerConfig()
config.Strategy = newPartitionTrackingStrategy(t, numMessages, consumeTimeout, consumeStatus, checkPartition)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 2})
actual := <-consumeStatus
expectedForPartition := <-consumeStatus
if actual != numMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actual)
}
closeWithin(t, 10*time.Second, consumer)
staticConfig := testConsumerConfig()
staticConfig.Groupid = "static-test-group"
staticConfig.Strategy = newCountingStrategy(t, expectedForPartition, consumeTimeout, consumeStatus)
staticConsumer := NewConsumer(staticConfig)
go staticConsumer.StartStaticPartitions(map[string][]int32{topic: []int32{checkPartition}})
if actualForPartition := <-consumeStatus; actualForPartition != expectedForPartition {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", numMessages, consumeTimeout, actualForPartition)
}
closeWithin(t, 10*time.Second, staticConsumer)
}
func TestMessagesProcessedOnce(t *testing.T) {
closeTimeout := 15 * time.Second
consumeFinished := make(chan bool)
messages := 100
topic := fmt.Sprintf("test-processing-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
go produceN(t, messages, topic, localBroker)
config := testConsumerConfig()
messagesMap := make(map[string]bool)
var messagesMapLock sync.Mutex
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
value := string(msg.Value)
inLock(&messagesMapLock, func() {
if _, exists := messagesMap[value]; exists {
t.Errorf("Duplicate message: %s", value)
}
messagesMap[value] = true
if len(messagesMap) == messages {
consumeFinished <- true
}
})
return NewSuccessfulResult(id)
}
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
case <-consumeFinished:
case <-time.After(consumeTimeout):
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", messages, consumeTimeout, len(messagesMap))
}
closeWithin(t, closeTimeout, consumer)
//restart consumer
zkConfig := NewZookeeperConfig()
zkConfig.ZookeeperConnect = []string{localZk}
config.Coordinator = NewZookeeperCoordinator(zkConfig)
consumer = NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
//this happens if we get a duplicate
case <-consumeFinished:
//and this happens normally
case <-time.After(closeTimeout):
}
closeWithin(t, closeTimeout, consumer)
}
func TestSequentialConsuming(t *testing.T) {
topic := fmt.Sprintf("test-sequential-%d", time.Now().Unix())
messages := make([]string, 0)
for i := 0; i < numMessages; i++ {
messages = append(messages, fmt.Sprintf("test-message-%d", i))
}
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
produce(t, messages, topic, localBroker, sarama.CompressionNone)
config := testConsumerConfig()
config.NumWorkers = 1
successChan := make(chan bool)
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
value := string(msg.Value)
Debug("test", value)
message := messages[0]
assert(t, value, message)
messages = messages[1:]
if len(messages) == 0 {
successChan <- true
}
return NewSuccessfulResult(id)
}
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
case <-successChan:
case <-time.After(consumeTimeout):
t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestGzipCompression(t *testing.T) {
testCompression(t, sarama.CompressionGZIP)
}
func TestSnappyCompression(t *testing.T) {
testCompression(t, sarama.CompressionSnappy)
}
func testCompression(t *testing.T, codec sarama.CompressionCodec) {
topic := fmt.Sprintf("test-compression-%d", time.Now().Unix())
messages := make([]string, 0)
for i := 0; i < numMessages; i++ {
messages = append(messages, fmt.Sprintf("test-message-%d", i))
}
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
produce(t, messages, topic, localBroker, codec)
config := testConsumerConfig()
config.NumWorkers = 1
successChan := make(chan bool)
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
value := string(msg.Value)
Warn("test", value)
message := messages[0]
assert(t, value, message)
messages = messages[1:]
if len(messages) == 0 {
successChan <- true
}
return NewSuccessfulResult(id)
}
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
case <-successChan:
case <-time.After(consumeTimeout):
t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout)
}
closeWithin(t, 10*time.Second, consumer)
}
func TestBlueGreenDeployment(t *testing.T) {
partitions := 2
activeTopic := fmt.Sprintf("active-%d", time.Now().Unix())
inactiveTopic := fmt.Sprintf("inactive-%d", time.Now().Unix())
zkConfig := NewZookeeperConfig()
zkConfig.ZookeeperConnect = []string{localZk}
coordinator := NewZookeeperCoordinator(zkConfig)
coordinator.Connect()
CreateMultiplePartitionsTopic(localZk, activeTopic, partitions)
EnsureHasLeader(localZk, activeTopic)
CreateMultiplePartitionsTopic(localZk, inactiveTopic, partitions)
EnsureHasLeader(localZk, inactiveTopic)
blueGroup := fmt.Sprintf("blue-%d", time.Now().Unix())
greenGroup := fmt.Sprintf("green-%d", time.Now().Unix())
processedInactiveMessages := 0
var inactiveCounterLock sync.Mutex
processedActiveMessages := 0
var activeCounterLock sync.Mutex
inactiveStrategy := func(worker *Worker, msg *Message, taskId TaskId) WorkerResult {
atomicIncrement(&processedInactiveMessages, &inactiveCounterLock)
return NewSuccessfulResult(taskId)
}
activeStrategy := func(worker *Worker, msg *Message, taskId TaskId) WorkerResult {
atomicIncrement(&processedActiveMessages, &activeCounterLock)
return NewSuccessfulResult(taskId)
}
blueGroupConsumers := []*Consumer{createConsumerForGroup(blueGroup, inactiveStrategy), createConsumerForGroup(blueGroup, inactiveStrategy)}
greenGroupConsumers := []*Consumer{createConsumerForGroup(greenGroup, activeStrategy), createConsumerForGroup(greenGroup, activeStrategy)}
for _, consumer := range blueGroupConsumers {
consumer.config.BarrierTimeout = 10 * time.Second
go consumer.StartStatic(map[string]int{
activeTopic: 1,
})
}
for _, consumer := range greenGroupConsumers {
consumer.config.BarrierTimeout = 10 * time.Second
go consumer.StartStatic(map[string]int{
inactiveTopic: 1,
})
}
blue := BlueGreenDeployment{activeTopic, "static", blueGroup}
green := BlueGreenDeployment{inactiveTopic, "static", greenGroup}
time.Sleep(30 * time.Second)
coordinator.RequestBlueGreenDeployment(blue, green)
time.Sleep(30 * time.Second)
//All Blue consumers should switch to Green group and change topic to inactive
greenConsumerIds, _ := coordinator.GetConsumersInGroup(greenGroup)
for _, consumer := range blueGroupConsumers {
found := false
for _, consumerId := range greenConsumerIds {
if consumerId == consumer.config.Consumerid {
found = true
}
}
assert(t, found, true)
}
//All Green consumers should switch to Blue group and change topic to active
blueConsumerIds, _ := coordinator.GetConsumersInGroup(blueGroup)
for _, consumer := range greenGroupConsumers {
found := false
for _, consumerId := range blueConsumerIds {
if consumerId == consumer.config.Consumerid {
found = true
}
}
assert(t, found, true)
}
//At this stage Blue group became Green group
//and Green group became Blue group
//Producing messages to both topics
produceMessages := 10
Infof(activeTopic, "Produce %d message", produceMessages)
go produceN(t, produceMessages, activeTopic, localBroker)
Infof(inactiveTopic, "Produce %d message", produceMessages)
go produceN(t, produceMessages, inactiveTopic, localBroker)
time.Sleep(10 * time.Second)
//Green group consumes from inactive topic
assert(t, processedInactiveMessages, produceMessages)
//Blue group consumes from active topic
assert(t, processedActiveMessages, produceMessages)
for _, consumer := range blueGroupConsumers {
closeWithin(t, 60*time.Second, consumer)
}
for _, consumer := range greenGroupConsumers {
closeWithin(t, 60*time.Second, consumer)
}
}
func TestConsumeAfterRebalance(t *testing.T) {
partitions := 10
topic := fmt.Sprintf("testConsumeAfterRebalance-%d", time.Now().Unix())
group := fmt.Sprintf("consumeAfterRebalanceGroup-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, partitions)
EnsureHasLeader(localZk, topic)
consumeMessages := 10
delayTimeout := 10 * time.Second
consumeTimeout := 60 * time.Second
consumeStatus1 := make(chan int)
consumeStatus2 := make(chan int)
consumer1 := createConsumerForGroup(group, newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus1))
consumer2 := createConsumerForGroup(group, newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus2))
go consumer1.StartStatic(map[string]int{topic: 1})
time.Sleep(delayTimeout)
go consumer2.StartStatic(map[string]int{topic: 1})
time.Sleep(delayTimeout)
closeWithin(t, delayTimeout, consumer2)
Infof(topic, "Produce %d message", consumeMessages)
produceN(t, consumeMessages, topic, localBroker)
if actual := <-consumeStatus1; actual != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
}
closeWithin(t, delayTimeout, consumer1)
}
// Test that the first offset for a consumer group is correctly
// saved even after receiving just one message.
func TestConsumeFirstOffset(t *testing.T) {
topic := fmt.Sprintf("test-consume-first-offset-%d", time.Now().Unix())
group := fmt.Sprintf("test-group-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, 1)
EnsureHasLeader(localZk, topic)
produce(t, []string{"m1"}, topic, localBroker, sarama.CompressionNone)
config := testConsumerConfig()
config.NumWorkers = 1
config.Groupid = group
successChan := make(chan bool)
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
value := string(msg.Value)
assert(t, value, "m1")
successChan <- true
return NewSuccessfulResult(id)
}
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
case <-successChan:
case <-time.After(consumeTimeout):
t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout)
}
closeWithin(t, 10*time.Second, consumer)
produce(t, []string{"m2"}, topic, localBroker, sarama.CompressionNone)
config.Strategy = func(_ *Worker, msg *Message, id TaskId) WorkerResult {
value := string(msg.Value)
assert(t, value, "m2")
successChan <- true
return NewSuccessfulResult(id)
}
consumer = NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
select {
case <-successChan:
case <-time.After(consumeTimeout):
t.Errorf("Failed to consume %d messages within %s", numMessages, consumeTimeout)
}
closeWithin(t, 10*time.Second, consumer)
}
// Test consumer will properly start consuming a topic when it is created after starting the consumer but before it fails to fetch topic info
func TestCreateTopicAfterStartConsuming(t *testing.T) {
partitions := 2
topic := fmt.Sprintf("testConsumeAfterRebalance-%d", time.Now().Unix())
consumeMessages := 10
delayTimeout := 10 * time.Second
consumeTimeout := 60 * time.Second
consumeStatus := make(chan int)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 2})
time.Sleep(10 * time.Second)
CreateMultiplePartitionsTopic(localZk, topic, partitions)
EnsureHasLeader(localZk, topic)
Infof(topic, "Produce %d message", consumeMessages)
produceN(t, consumeMessages, topic, localBroker)
if actual := <-consumeStatus; actual != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
}
closeWithin(t, delayTimeout, consumer)
}
func TestConsumeDistinctTopicsWithDistinctPartitions(t *testing.T) {
topic1 := fmt.Sprintf("testConsumeDistinctTopics-%d", time.Now().UnixNano())
topic1Partitions := 16
topic2 := fmt.Sprintf("testConsumeDistinctTopics-%d", time.Now().UnixNano())
topic2Partitions := 4
CreateMultiplePartitionsTopic(localZk, topic1, topic1Partitions)
EnsureHasLeader(localZk, topic1)
Infof("distinct-topics-test", "Topic %s is created and has a leader elected", topic1)
CreateMultiplePartitionsTopic(localZk, topic2, topic2Partitions)
EnsureHasLeader(localZk, topic2)
Infof("distinct-topics-test", "Topic %s is created and has a leader elected", topic2)
consumeMessages := 100
delayTimeout := 10 * time.Second
consumeTimeout := 60 * time.Second
consumeStatus := make(chan map[string]map[int]int)
for partition := 0; partition < topic1Partitions; partition++ {
produceNToTopicPartition(t, consumeMessages, topic1, partition, localBroker)
}
Infof("distinct-topics-test", "Produced %d messages to each partition of topic %s", consumeMessages, topic1)
for partition := 0; partition < topic2Partitions; partition++ {
produceNToTopicPartition(t, consumeMessages, topic2, partition, localBroker)
}
Infof("distinct-topics-test", "Produced %d messages to each partition of topic %s", consumeMessages, topic2)
config := testConsumerConfig()
config.Strategy = newAllPartitionsTrackingStrategy(t, consumeMessages*(topic1Partitions+topic2Partitions), consumeTimeout, consumeStatus)
config.KeyDecoder = &Int32Decoder{}
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic1: topic1Partitions, topic2: topic2Partitions})
consumed := <-consumeStatus
for _, partitionInfo := range consumed {
for _, numMessages := range partitionInfo {
if numMessages != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %v", consumeMessages, consumeTimeout, consumed)
}
}
}
closeWithin(t, delayTimeout, consumer)
}
func TestConsumeMultipleTopics(t *testing.T) {
partitions1 := 16
partitions2 := 4
topic1 := fmt.Sprintf("testConsumeMultipleTopics-1-%d", time.Now().Unix())
topic2 := fmt.Sprintf("testConsumeMultipleTopics-2-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic1, partitions1)
EnsureHasLeader(localZk, topic1)
CreateMultiplePartitionsTopic(localZk, topic2, partitions2)
EnsureHasLeader(localZk, topic2)
consumeMessages := 5000
produceMessages1 := 4000
produceMessages2 := 1000
delayTimeout := 10 * time.Second
consumeTimeout := 60 * time.Second
consumeStatus := make(chan int)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic1: 2, topic2: 2})
Infof(topic1, "Produce %d message", produceMessages1)
produceN(t, produceMessages1, topic1, localBroker)
Infof(topic2, "Produce %d message", produceMessages2)
produceN(t, produceMessages2, topic2, localBroker)
if actual := <-consumeStatus; actual != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
}
closeWithin(t, delayTimeout, consumer)
}
func TestConsumeOnePartitionWithData(t *testing.T) {
partitions := 50
topic := fmt.Sprintf("testConsumeOnePartitionWithData-%d", time.Now().Unix())
CreateMultiplePartitionsTopic(localZk, topic, partitions)
EnsureHasLeader(localZk, topic)
consumeMessages := 1000
delayTimeout := 20 * time.Second
consumeTimeout := 60 * time.Second
consumeStatus := make(chan int)
Infof(topic, "Produce %d messages", consumeMessages)
produceNToTopicPartition(t, consumeMessages, topic, rand.Int()%partitions, localBroker)
config := testConsumerConfig()
config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus)
consumer := NewConsumer(config)
go consumer.StartStatic(map[string]int{topic: 1})
if actual := <-consumeStatus; actual != consumeMessages {
t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
}
closeWithin(t, delayTimeout, consumer)
}
func testConsumerConfig() *ConsumerConfig {
config := DefaultConsumerConfig()
config.AutoOffsetReset = SmallestOffset
config.WorkerFailureCallback = func(_ *WorkerManager) FailedDecision {
return CommitOffsetAndContinue
}
config.WorkerFailedAttemptCallback = func(_ *Task, _ WorkerResult) FailedDecision {
return CommitOffsetAndContinue
}
config.Strategy = goodStrategy
zkConfig := NewZookeeperConfig()
zkConfig.ZookeeperConnect = []string{localZk}
zkConfig.MaxRequestRetries = 10
zkConfig.ZookeeperTimeout = 30 * time.Second
zkConfig.RequestBackoff = 3 * time.Second
config.Coordinator = NewZookeeperCoordinator(zkConfig)
return config
}
func createConsumerForGroup(group string, strategy WorkerStrategy) *Consumer {
config := testConsumerConfig()
config.Groupid = group
config.NumConsumerFetchers = 1
config.NumWorkers = 1
config.FetchBatchTimeout = 1 * time.Second
config.FetchBatchSize = 1
config.Strategy = strategy
return NewConsumer(config)
}
func newCountingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan int) WorkerStrategy {
return newPartitionTrackingStrategy(t, expectedMessages, timeout, notify, -1)
}
func newPartitionTrackingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan int, trackPartition int32) WorkerStrategy {
allConsumedMessages := 0
partitionConsumedMessages := 0
var consumedMessagesLock sync.Mutex
consumeFinished := make(chan bool)
go func() {
select {
case <-consumeFinished:
case <-time.After(timeout):
}
inLock(&consumedMessagesLock, func() {
notify <- allConsumedMessages
if trackPartition != -1 {
notify <- partitionConsumedMessages
}
})
}()
return func(_ *Worker, msg *Message, id TaskId) WorkerResult {
inLock(&consumedMessagesLock, func() {
if msg.Partition == trackPartition || trackPartition == -1 {
partitionConsumedMessages++
}
allConsumedMessages++
if allConsumedMessages == expectedMessages {
consumeFinished <- true
}
})
return NewSuccessfulResult(id)
}
}
func newAllPartitionsTrackingStrategy(t *testing.T, expectedMessages int, timeout time.Duration, notify chan map[string]map[int]int) WorkerStrategy {
allConsumedMessages := make(map[string]map[int]int)
var consumedMessagesLock sync.Mutex
consumeFinished := make(chan bool)
go func() {
select {
case <-consumeFinished:
case <-time.After(timeout):
}
inLock(&consumedMessagesLock, func() {
notify <- allConsumedMessages
})
}()
return func(_ *Worker, msg *Message, id TaskId) WorkerResult {
inLock(&consumedMessagesLock, func() {
if _, exists := allConsumedMessages[msg.Topic]; !exists {
allConsumedMessages[msg.Topic] = make(map[int]int)
}
allConsumedMessages[msg.Topic][int(msg.DecodedKey.(uint32))]++
total := 0
for _, partitionInfo := range allConsumedMessages {
for _, numMessages := range partitionInfo {
total += numMessages
}
}
if total == expectedMessages {
consumeFinished <- true
}
})
return NewSuccessfulResult(id)
}
}
func atomicIncrement(counter *int, lock *sync.Mutex) {
inLock(lock, func() {
*counter++
})
}
| lazyval/go_kafka_mirror | consumer_test.go | GO | apache-2.0 | 24,744 |
/*=========================================================================
Library: CTK
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.txt
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================*/
#include <ctkHighPrecisionTimer.h>
#include <QDebug>
#include <QTest>
//-----------------------------------------------------------------------------
int ctkHighPrecisionTimerTest(int /*argc*/, char* /*argv*/[])
{
ctkHighPrecisionTimer timer;
timer.start();
QTest::qSleep(200);
qint64 millis = timer.elapsedMilli();
qint64 micros = timer.elapsedMicro();
if (millis < 200 || millis > 300 ||
micros < 200*1000 || micros > 300*1000)
{
qDebug() << "Measured time (" << millis << "ms | " << micros << "us) is not between 200 and 300ms.";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| jhonj624/CTK | Libs/Core/Testing/Cpp/ctkHighPrecisionTimerTest.cpp | C++ | apache-2.0 | 1,432 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#nullable enable
using System.Collections.Immutable;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.CodeActions;
using Microsoft.CodeAnalysis.Shared.Extensions;
using Microsoft.CodeAnalysis.Text;
namespace Microsoft.CodeAnalysis.CodeRefactorings
{
internal static class CodeRefactoringContextExtensions
{
/// <summary>
/// Use this helper to register multiple refactorings (<paramref name="actions"/>).
/// </summary>
internal static void RegisterRefactorings<TCodeAction>(
this CodeRefactoringContext context, ImmutableArray<TCodeAction> actions)
where TCodeAction : CodeAction
{
if (!actions.IsDefault)
{
foreach (var action in actions)
{
context.RegisterRefactoring(action);
}
}
}
internal static Task<TSyntaxNode?> TryGetRelevantNodeAsync<TSyntaxNode>(this CodeRefactoringContext context)
where TSyntaxNode : SyntaxNode
=> TryGetRelevantNodeAsync<TSyntaxNode>(context.Document, context.Span, context.CancellationToken);
internal static Task<ImmutableArray<TSyntaxNode>> GetRelevantNodesAsync<TSyntaxNode>(this CodeRefactoringContext context)
where TSyntaxNode : SyntaxNode
=> GetRelevantNodesAsync<TSyntaxNode>(context.Document, context.Span, context.CancellationToken);
internal static async Task<TSyntaxNode?> TryGetRelevantNodeAsync<TSyntaxNode>(
this Document document,
TextSpan span,
CancellationToken cancellationToken)
where TSyntaxNode : SyntaxNode
{
var potentialNodes = await GetRelevantNodesAsync<TSyntaxNode>(document, span, cancellationToken).ConfigureAwait(false);
return potentialNodes.FirstOrDefault();
}
internal static Task<ImmutableArray<TSyntaxNode>> GetRelevantNodesAsync<TSyntaxNode>(
this Document document,
TextSpan span,
CancellationToken cancellationToken) where TSyntaxNode : SyntaxNode
{
var helpers = document.GetRequiredLanguageService<IRefactoringHelpersService>();
return helpers.GetRelevantNodesAsync<TSyntaxNode>(document, span, cancellationToken);
}
}
}
| reaction1989/roslyn | src/Features/Core/Portable/CodeRefactorings/CodeRefactoringContextExtensions.cs | C# | apache-2.0 | 2,593 |
var VERB_NEW = 1;
var VERB_CHANGED = 2;
var VERB_CURRENT = 3;
var VERB_NEW_CLASS = "verb-new";
var VERB_CHANGED_CLASS = "verb-changed";
var VERB_CURRENT_CLASS = "verb-current";
var EDITED_CLASS = "edited";
var NOT_STARTED_TRANSLATION = 0;
var VALID_TRANSLATION = 1;
var INVALID_TRANSLATION = 2;
function getVerbClassName(verb) {
switch (verb) {
case VERB_NEW:
// new source value added, no mapping to target exists
return VERB_NEW_CLASS;
case VERB_CHANGED:
// source value changed, mapping to target likely invalid
return VERB_CHANGED_CLASS;
case VERB_CURRENT:
// source value is mapped to valid target value
return VERB_CURRENT_CLASS;
default: return "";
}
}
/**
* Iterate over the items of the InputEx form.
*
* @param env The cb_global object
* @param action A function which is passed the sections and items of the form
*/
function iterateFormItems(env, action) {
$.each(env.form.inputsNames.sections.subFields, function(i, section) {
$.each(section.inputsNames.data.subFields, function(j, item) {
action(section, item);
})
});
}
function getSectionByName(env, name){
var section = null;
$.each(env.form.inputsNames.sections.subFields, function(i, s){
if (s.inputsNames.name.getValue() == name) {
section = s;
return false;
}
});
return section;
}
function markAsEdited(item) {
item.changed.setValue(true);
$(item.changed.el).closest("fieldset")
.removeClass().addClass(EDITED_CLASS);
}
function insertValidateButton() {
var button = new Y.inputEx.widget.Button({
type: "submit-link",
value: "Validate",
className: "inputEx-Button inputEx-Button-Submit-Link gcb-pull-left",
onClick: onClickValidate
});
button.render($("div.inputEx-Form-buttonBar")[0]);
// Button rendering will append the button at the end of the div, so we
// move it to the second position after it's been created.
$("div.inputEx-Form-buttonBar > a:first-child").after(button.el);
cb_global.form.buttons.splice(1, 0, button);
}
function onClickValidate() {
disableAllControlButtons(cb_global.form);
var request = {
key: cb_global.save_args.key,
xsrf_token: cb_global.xsrf_token,
payload: JSON.stringify(cb_global.form.getValue()),
validate: true
}
Y.io(cb_global.save_url, {
method: "PUT",
data: {"request": JSON.stringify(request)},
on: {
complete: onValidateComplete
}
});
return false;
}
function onValidateComplete(transactionId, response, args) {
enableAllControlButtons(cb_global.form);
if (response.status != 200) {
cbShowMsg("Server error, please try again.");
return;
}
response = parseJson(response.responseText);
if (response.status != 200) {
cbShowMsg(response.message);
}
var payload = JSON.parse(response.payload || "{}");
for (var name in payload) {
if (payload.hasOwnProperty(name)) {
var section = getSectionByName(cb_global, name);
addValidationFeedbackTo(section.divEl.firstChild, payload[name]);
}
}
}
function addValidationFeedbackTo(fieldsetEl, feedback) {
$("div.validation-feedback", fieldsetEl).remove();
var feedbackDiv = $("<div/>").addClass("validation-feedback");
if (feedback.status == VALID_TRANSLATION) {
feedbackDiv.addClass("valid");
} else {
feedbackDiv.addClass("invalid");
}
feedbackDiv.append($("<div/>").addClass("icon"));
feedbackDiv.append($("<div/>").addClass("errm").text(feedback.errm));
$(fieldsetEl).append(feedbackDiv);
}
function markValidationFeedbackStale(sectionField) {
$("div.validation-feedback", sectionField.divEl)
.removeClass()
.addClass("validation-feedback stale");
}
$(function() {
iterateFormItems(cb_global, function(sectionField, itemField) {
var verb = itemField.inputsNames.verb.getValue();
$(itemField.divEl.firstChild).addClass(getVerbClassName(verb));
});
$(".disabled textarea").prop("disabled", true);
// Insert the status indicators into the DOM
$(".translation-item fieldset fieldset")
.append($("<div class=\"status\"></div>"));
// Set up the accept buttons to appear when there is changed content
iterateFormItems(cb_global, function(sectionField, itemField) {
var button = $("<button class=\"accept inputEx-Button\">Accept</button>");
button.click(function() {
markAsEdited(itemField.inputsNames);
return false;
});
$(itemField.divEl.firstChild).append(button);
});
$(".translation-console > fieldset > div:last-child").before($(
"<div class=\"translation-header\">" +
" <div>Source (<span class=\"source-locale\"></span>)</div>" +
" <div>Translation (<span class=\"target-locale\"></span>)</div>" +
"</div>"));
var formValue = cb_global.form.getValue();
$(".translation-header .source-locale").text(formValue['source_locale']);
$(".translation-header .target-locale").text(formValue['target_locale']);
iterateFormItems(cb_global, function(sectionField, itemField) {
$(itemField.inputsNames.target_value.el).on("input change", function() {
// Listen on "change" for older browser support
markAsEdited(itemField.inputsNames);
markValidationFeedbackStale(sectionField);
});
});
cb_global.onSaveComplete = function() {
iterateFormItems(cb_global, function(sectionField, itemField) {
var item = itemField.inputsNames;
if (item.changed.getValue()) {
item.verb.setValue(VERB_CURRENT);
$(item.changed.el).closest('fieldset')
.removeClass().addClass(VERB_CURRENT_CLASS);
}
item.changed.setValue(false);
});
cb_global.lastSavedFormValue = cb_global.form.getValue();
};
insertValidateButton();
});
| UniMOOC/AAClassroom | modules/i18n_dashboard/templates/translation_console.js | JavaScript | apache-2.0 | 5,738 |
import os, sys
sys.path.insert(1, "../../../")
import h2o, tests
def deeplearning_multi():
print("Test checks if Deep Learning works fine with a multiclass training and test dataset")
prostate = h2o.import_file(h2o.locate("smalldata/logreg/prostate.csv"))
prostate[4] = prostate[4].asfactor()
hh = h2o.deeplearning(x = prostate[0:2],
y = prostate[4],
validation_x = prostate[0:2],
validation_y = prostate[4],
loss = 'CrossEntropy')
hh.show()
if __name__ == '__main__':
tests.run_test(sys.argv, deeplearning_multi)
| brightchen/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_multiclassDeepLearning.py | Python | apache-2.0 | 688 |
/*
* Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.developerstudio.esb.form.editors.article.rcp;
import java.util.MissingResourceException;
import java.util.ResourceBundle;
public class Messages {
private static final String BUNDLE_NAME = "org.wso2.developerstudio.esb.form.editors.article.rcp.messages"; //$NON-NLS-1$
private static final ResourceBundle RESOURCE_BUNDLE = ResourceBundle
.getBundle(BUNDLE_NAME);
private Messages() {
}
public static String getString(String key) {
// TODO Auto-generated method stub
try {
return RESOURCE_BUNDLE.getString(key);
} catch (MissingResourceException e) {
return '!' + key + '!';
}
}
}
| prabushi/devstudio-tooling-esb | plugins/org.wso2.developerstudio.esb.form.editors/src/org/wso2/developerstudio/esb/form/editors/article/rcp/Messages.java | Java | apache-2.0 | 1,304 |
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
int main(int argc, char *argv[]) {
return 0;
}
| apache/incubator-trafodion | core/sqf/src/win/t.cpp | C++ | apache-2.0 | 910 |
/*
* Copyright (c) 2017, Adam <Adam@sigterm.info>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.http.api.account;
import com.google.gson.JsonParseException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.UUID;
import net.runelite.http.api.RuneLiteAPI;
import okhttp3.HttpUrl;
import okhttp3.Request;
import okhttp3.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AccountClient
{
private static final Logger logger = LoggerFactory.getLogger(AccountClient.class);
private UUID uuid;
public AccountClient()
{
}
public AccountClient(UUID uuid)
{
this.uuid = uuid;
}
public OAuthResponse login() throws IOException
{
HttpUrl url = RuneLiteAPI.getApiBase().newBuilder()
.addPathSegment("account")
.addPathSegment("login")
.build();
logger.debug("Built URI: {}", url);
Request request = new Request.Builder()
.url(url)
.build();
try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute())
{
InputStream in = response.body().byteStream();
return RuneLiteAPI.GSON.fromJson(new InputStreamReader(in), OAuthResponse.class);
}
catch (JsonParseException ex)
{
throw new IOException(ex);
}
}
public void logout() throws IOException
{
HttpUrl url = RuneLiteAPI.getApiBase().newBuilder()
.addPathSegment("account")
.addPathSegment("logout")
.build();
logger.debug("Built URI: {}", url);
Request request = new Request.Builder()
.header(RuneLiteAPI.RUNELITE_AUTH, uuid.toString())
.url(url)
.build();
try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute())
{
logger.debug("Sent logout request");
}
}
public boolean sesssionCheck()
{
HttpUrl url = RuneLiteAPI.getApiBase().newBuilder()
.addPathSegment("account")
.addPathSegment("session-check")
.build();
logger.debug("Built URI: {}", url);
Request request = new Request.Builder()
.header(RuneLiteAPI.RUNELITE_AUTH, uuid.toString())
.url(url)
.build();
try (Response response = RuneLiteAPI.CLIENT.newCall(request).execute())
{
return response.isSuccessful();
}
catch (IOException ex)
{
logger.debug("Unable to verify session", ex);
return true; // assume it is still valid if the server is unreachable
}
}
}
| UniquePassive/runelite | http-api/src/main/java/net/runelite/http/api/account/AccountClient.java | Java | bsd-2-clause | 3,621 |
cask :v1 => 'moneywell' do
version '2.3.4'
sha256 'f4b900576657c669a40481d7c2ad1ad6d48a4468d16963d4e9c8ddeca9c1548a'
url "http://downloads.nothirst.com/MoneyWell_#{version.sub(%r{^(\d+)\.(\d+).*},'\1\2')}.zip"
appcast 'http://nothirst.com/feeds/MoneyWell2Appcast.xml',
:sha256 => '8de9519f9ff874d9baf67feefbe3f258ca89e6c07fbdf35fef6f1a6c55af9ea2'
homepage 'http://nothirst.com/moneywell/'
license :unknown
app 'MoneyWell.app'
end
| L2G/homebrew-cask | Casks/moneywell.rb | Ruby | bsd-2-clause | 456 |
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/installer/setup/install.h"
#include <windows.h>
#include <shlobj.h>
#include <time.h>
#include <string>
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/shortcut.h"
#include "base/win/windows_version.h"
#include "chrome/common/chrome_constants.h"
#include "chrome/common/chrome_switches.h"
#include "chrome/installer/setup/install_worker.h"
#include "chrome/installer/setup/setup_constants.h"
#include "chrome/installer/setup/setup_util.h"
#include "chrome/installer/setup/update_active_setup_version_work_item.h"
#include "chrome/installer/util/auto_launch_util.h"
#include "chrome/installer/util/beacons.h"
#include "chrome/installer/util/browser_distribution.h"
#include "chrome/installer/util/create_reg_key_work_item.h"
#include "chrome/installer/util/delete_after_reboot_helper.h"
#include "chrome/installer/util/google_update_constants.h"
#include "chrome/installer/util/helper.h"
#include "chrome/installer/util/install_util.h"
#include "chrome/installer/util/master_preferences.h"
#include "chrome/installer/util/master_preferences_constants.h"
#include "chrome/installer/util/set_reg_value_work_item.h"
#include "chrome/installer/util/util_constants.h"
#include "chrome/installer/util/work_item.h"
#include "chrome/installer/util/work_item_list.h"
namespace {
void LogShortcutOperation(ShellUtil::ShortcutLocation location,
BrowserDistribution* dist,
const ShellUtil::ShortcutProperties& properties,
ShellUtil::ShortcutOperation operation,
bool failed) {
// ShellUtil::SHELL_SHORTCUT_UPDATE_EXISTING should not be used at install and
// thus this method does not handle logging a message for it.
DCHECK(operation != ShellUtil::SHELL_SHORTCUT_UPDATE_EXISTING);
std::string message;
if (failed)
message.append("Failed: ");
message.append(
(operation == ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS ||
operation == ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL) ?
"Creating " : "Overwriting ");
if (failed && operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING)
message.append("(maybe the shortcut doesn't exist?) ");
message.append((properties.level == ShellUtil::CURRENT_USER) ? "per-user " :
"all-users ");
switch (location) {
case ShellUtil::SHORTCUT_LOCATION_DESKTOP:
message.append("Desktop ");
break;
case ShellUtil::SHORTCUT_LOCATION_QUICK_LAUNCH:
message.append("Quick Launch ");
break;
case ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_DIR:
message.append("Start menu/" +
base::UTF16ToUTF8(dist->GetStartMenuShortcutSubfolder(
BrowserDistribution::SUBFOLDER_CHROME)) +
" ");
break;
case ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_APPS_DIR:
message.append("Start menu/" +
base::UTF16ToUTF8(dist->GetStartMenuShortcutSubfolder(
BrowserDistribution::SUBFOLDER_APPS)) +
" ");
break;
default:
NOTREACHED();
}
message.push_back('"');
if (properties.has_shortcut_name())
message.append(base::UTF16ToUTF8(properties.shortcut_name));
else
message.append(base::UTF16ToUTF8(dist->GetDisplayName()));
message.push_back('"');
message.append(" shortcut to ");
message.append(base::UTF16ToUTF8(properties.target.value()));
if (properties.has_arguments())
message.append(base::UTF16ToUTF8(properties.arguments));
if (properties.pin_to_taskbar &&
base::win::GetVersion() >= base::win::VERSION_WIN7) {
message.append(" and pinning to the taskbar");
}
if (properties.pin_to_start &&
base::win::GetVersion() >= base::win::VERSION_WIN10) {
message.append(" and pinning to Start");
}
message.push_back('.');
if (failed)
LOG(WARNING) << message;
else
VLOG(1) << message;
}
void ExecuteAndLogShortcutOperation(
ShellUtil::ShortcutLocation location,
BrowserDistribution* dist,
const ShellUtil::ShortcutProperties& properties,
ShellUtil::ShortcutOperation operation) {
LogShortcutOperation(location, dist, properties, operation, false);
if (!ShellUtil::CreateOrUpdateShortcut(location, dist, properties,
operation)) {
LogShortcutOperation(location, dist, properties, operation, true);
}
}
void AddChromeToMediaPlayerList() {
base::string16 reg_path(installer::kMediaPlayerRegPath);
// registry paths can also be appended like file system path
reg_path.push_back(base::FilePath::kSeparators[0]);
reg_path.append(installer::kChromeExe);
VLOG(1) << "Adding Chrome to Media player list at " << reg_path;
scoped_ptr<WorkItem> work_item(WorkItem::CreateCreateRegKeyWorkItem(
HKEY_LOCAL_MACHINE, reg_path, WorkItem::kWow64Default));
// if the operation fails we log the error but still continue
if (!work_item.get()->Do())
LOG(ERROR) << "Could not add Chrome to media player inclusion list.";
}
// Copy master_preferences file provided to installer, in the same folder
// as chrome.exe so Chrome first run can find it. This function will be called
// only on the first install of Chrome.
void CopyPreferenceFileForFirstRun(
const installer::InstallerState& installer_state,
const base::FilePath& prefs_source_path) {
base::FilePath prefs_dest_path(installer_state.target_path().AppendASCII(
installer::kDefaultMasterPrefs));
if (!base::CopyFile(prefs_source_path, prefs_dest_path)) {
VLOG(1) << "Failed to copy master preferences from:"
<< prefs_source_path.value() << " gle: " << ::GetLastError();
}
}
// This function installs a new version of Chrome to the specified location.
//
// setup_path: Path to the executable (setup.exe) as it will be copied
// to Chrome install folder after install is complete
// archive_path: Path to the archive (chrome.7z) as it will be copied
// to Chrome install folder after install is complete
// src_path: the path that contains a complete and unpacked Chrome package
// to be installed.
// temp_path: the path of working directory used during installation. This path
// does not need to exist.
// new_version: new Chrome version that needs to be installed
// current_version: returns the current active version (if any)
//
// This function makes best effort to do installation in a transactional
// manner. If failed it tries to rollback all changes on the file system
// and registry. For example, if package exists before calling the
// function, it rolls back all new file and directory changes under
// package. If package does not exist before calling the function
// (typical new install), the function creates package during install
// and removes the whole directory during rollback.
installer::InstallStatus InstallNewVersion(
const installer::InstallationState& original_state,
const installer::InstallerState& installer_state,
const base::FilePath& setup_path,
const base::FilePath& archive_path,
const base::FilePath& src_path,
const base::FilePath& temp_path,
const Version& new_version,
scoped_ptr<Version>* current_version) {
DCHECK(current_version);
installer_state.UpdateStage(installer::BUILDING);
current_version->reset(installer_state.GetCurrentVersion(original_state));
scoped_ptr<WorkItemList> install_list(WorkItem::CreateWorkItemList());
AddInstallWorkItems(original_state,
installer_state,
setup_path,
archive_path,
src_path,
temp_path,
current_version->get(),
new_version,
install_list.get());
base::FilePath new_chrome_exe(
installer_state.target_path().Append(installer::kChromeNewExe));
installer_state.UpdateStage(installer::EXECUTING);
if (!install_list->Do()) {
installer_state.UpdateStage(installer::ROLLINGBACK);
installer::InstallStatus result =
base::PathExists(new_chrome_exe) && current_version->get() &&
new_version.Equals(*current_version->get()) ?
installer::SAME_VERSION_REPAIR_FAILED :
installer::INSTALL_FAILED;
LOG(ERROR) << "Install failed, rolling back... result: " << result;
install_list->Rollback();
LOG(ERROR) << "Rollback complete. ";
return result;
}
installer_state.UpdateStage(installer::REFRESHING_POLICY);
installer::RefreshElevationPolicy();
if (!current_version->get()) {
VLOG(1) << "First install of version " << new_version.GetString();
return installer::FIRST_INSTALL_SUCCESS;
}
if (new_version.Equals(**current_version)) {
VLOG(1) << "Install repaired of version " << new_version.GetString();
return installer::INSTALL_REPAIRED;
}
if (new_version.CompareTo(**current_version) > 0) {
if (base::PathExists(new_chrome_exe)) {
VLOG(1) << "Version updated to " << new_version.GetString()
<< " while running " << (*current_version)->GetString();
return installer::IN_USE_UPDATED;
}
VLOG(1) << "Version updated to " << new_version.GetString();
return installer::NEW_VERSION_UPDATED;
}
LOG(ERROR) << "Not sure how we got here while updating"
<< ", new version: " << new_version.GetString()
<< ", old version: " << (*current_version)->GetString();
return installer::INSTALL_FAILED;
}
} // end namespace
namespace installer {
void EscapeXmlAttributeValueInSingleQuotes(base::string16* att_value) {
base::ReplaceChars(*att_value, base::ASCIIToUTF16("&"),
base::ASCIIToUTF16("&"), att_value);
base::ReplaceChars(*att_value, base::ASCIIToUTF16("'"),
base::ASCIIToUTF16("'"), att_value);
base::ReplaceChars(*att_value, base::ASCIIToUTF16("<"),
base::ASCIIToUTF16("<"), att_value);
}
bool CreateVisualElementsManifest(const base::FilePath& src_path,
const Version& version) {
// Construct the relative path to the versioned VisualElements directory.
base::string16 elements_dir(base::ASCIIToUTF16(version.GetString()));
elements_dir.push_back(base::FilePath::kSeparators[0]);
elements_dir.append(installer::kVisualElements);
// Some distributions of Chromium may not include visual elements. Only
// proceed if this distribution does.
if (!base::PathExists(src_path.Append(elements_dir))) {
VLOG(1) << "No visual elements found, not writing "
<< installer::kVisualElementsManifest << " to " << src_path.value();
return true;
} else {
// A printf-style format string for generating the visual elements
// manifest. Required arguments, in order, are:
// - Localized display name for the product.
// - Relative path to the VisualElements directory, three times.
static const char kManifestTemplate[] =
"<Application>\r\n"
" <VisualElements\r\n"
" DisplayName='%ls'\r\n"
" Logo='%ls\\Logo.png'\r\n"
" SmallLogo='%ls\\SmallLogo.png'\r\n"
" ForegroundText='light'\r\n"
" BackgroundColor='#323232'>\r\n"
" <DefaultTile ShowName='allLogos'/>\r\n"
" <SplashScreen Image='%ls\\splash-620x300.png'/>\r\n"
" </VisualElements>\r\n"
"</Application>";
const base::string16 manifest_template(
base::ASCIIToUTF16(kManifestTemplate));
BrowserDistribution* dist = BrowserDistribution::GetSpecificDistribution(
BrowserDistribution::CHROME_BROWSER);
// TODO(grt): http://crbug.com/75152 Write a reference to a localized
// resource for |display_name|.
base::string16 display_name(dist->GetDisplayName());
EscapeXmlAttributeValueInSingleQuotes(&display_name);
// Fill the manifest with the desired values.
base::string16 manifest16(base::StringPrintf(
manifest_template.c_str(), display_name.c_str(), elements_dir.c_str(),
elements_dir.c_str(), elements_dir.c_str()));
// Write the manifest to |src_path|.
const std::string manifest(base::UTF16ToUTF8(manifest16));
int size = base::checked_cast<int>(manifest.size());
if (base::WriteFile(
src_path.Append(installer::kVisualElementsManifest),
manifest.c_str(), size) == size) {
VLOG(1) << "Successfully wrote " << installer::kVisualElementsManifest
<< " to " << src_path.value();
return true;
} else {
PLOG(ERROR) << "Error writing " << installer::kVisualElementsManifest
<< " to " << src_path.value();
return false;
}
}
}
void CreateOrUpdateShortcuts(
const base::FilePath& target,
const installer::Product& product,
const MasterPreferences& prefs,
InstallShortcutLevel install_level,
InstallShortcutOperation install_operation) {
bool do_not_create_any_shortcuts = false;
prefs.GetBool(master_preferences::kDoNotCreateAnyShortcuts,
&do_not_create_any_shortcuts);
if (do_not_create_any_shortcuts)
return;
// Extract shortcut preferences from |prefs|.
bool do_not_create_desktop_shortcut = false;
bool do_not_create_quick_launch_shortcut = false;
bool do_not_create_taskbar_shortcut = false;
bool do_not_create_start_pin = false;
bool alternate_desktop_shortcut = false;
prefs.GetBool(master_preferences::kDoNotCreateDesktopShortcut,
&do_not_create_desktop_shortcut);
prefs.GetBool(master_preferences::kDoNotCreateQuickLaunchShortcut,
&do_not_create_quick_launch_shortcut);
prefs.GetBool(master_preferences::kDoNotCreateTaskbarShortcut,
&do_not_create_taskbar_shortcut);
prefs.GetBool(master_preferences::kDoNotCreateStartPin,
&do_not_create_start_pin);
prefs.GetBool(master_preferences::kAltShortcutText,
&alternate_desktop_shortcut);
BrowserDistribution* dist = product.distribution();
// The default operation on update is to overwrite shortcuts with the
// currently desired properties, but do so only for shortcuts that still
// exist.
ShellUtil::ShortcutOperation shortcut_operation;
switch (install_operation) {
case INSTALL_SHORTCUT_CREATE_ALL:
shortcut_operation = ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS;
break;
case INSTALL_SHORTCUT_CREATE_EACH_IF_NO_SYSTEM_LEVEL:
shortcut_operation = ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL;
break;
default:
DCHECK(install_operation == INSTALL_SHORTCUT_REPLACE_EXISTING);
shortcut_operation = ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING;
break;
}
// Shortcuts are always installed per-user unless specified.
ShellUtil::ShellChange shortcut_level = (install_level == ALL_USERS ?
ShellUtil::SYSTEM_LEVEL : ShellUtil::CURRENT_USER);
// |base_properties|: The basic properties to set on every shortcut installed
// (to be refined on a per-shortcut basis).
ShellUtil::ShortcutProperties base_properties(shortcut_level);
product.AddDefaultShortcutProperties(target, &base_properties);
if (!do_not_create_desktop_shortcut ||
shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) {
ShellUtil::ShortcutProperties desktop_properties(base_properties);
if (alternate_desktop_shortcut) {
desktop_properties.set_shortcut_name(
dist->GetShortcutName(
BrowserDistribution::SHORTCUT_CHROME_ALTERNATE));
}
ExecuteAndLogShortcutOperation(
ShellUtil::SHORTCUT_LOCATION_DESKTOP, dist, desktop_properties,
shortcut_operation);
// On update there is no harm in always trying to update the alternate
// Desktop shortcut.
if (!alternate_desktop_shortcut &&
shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) {
desktop_properties.set_shortcut_name(
dist->GetShortcutName(
BrowserDistribution::SHORTCUT_CHROME_ALTERNATE));
ExecuteAndLogShortcutOperation(
ShellUtil::SHORTCUT_LOCATION_DESKTOP, dist, desktop_properties,
shortcut_operation);
}
}
if (!do_not_create_quick_launch_shortcut ||
shortcut_operation == ShellUtil::SHELL_SHORTCUT_REPLACE_EXISTING) {
// There is no such thing as an all-users Quick Launch shortcut, always
// install the per-user shortcut.
ShellUtil::ShortcutProperties quick_launch_properties(base_properties);
quick_launch_properties.level = ShellUtil::CURRENT_USER;
ExecuteAndLogShortcutOperation(
ShellUtil::SHORTCUT_LOCATION_QUICK_LAUNCH, dist,
quick_launch_properties, shortcut_operation);
}
ShellUtil::ShortcutProperties start_menu_properties(base_properties);
// IMPORTANT: Only the default (no arguments and default browserappid) browser
// shortcut in the Start menu (Start screen on Win8+) should be made dual
// mode and that prior to Windows 10 only.
if (InstallUtil::ShouldInstallMetroProperties())
start_menu_properties.set_dual_mode(true);
if (shortcut_operation == ShellUtil::SHELL_SHORTCUT_CREATE_ALWAYS ||
shortcut_operation ==
ShellUtil::SHELL_SHORTCUT_CREATE_IF_NO_SYSTEM_LEVEL) {
start_menu_properties.set_pin_to_taskbar(!do_not_create_taskbar_shortcut);
// Disabled for now. TODO(gab): Remove this and the associated code if it
// remains disabled long term.
start_menu_properties.set_pin_to_start(false);
}
ExecuteAndLogShortcutOperation(
ShellUtil::SHORTCUT_LOCATION_START_MENU_CHROME_DIR, dist,
start_menu_properties, shortcut_operation);
}
void RegisterChromeOnMachine(const installer::InstallerState& installer_state,
const installer::Product& product,
bool make_chrome_default) {
DCHECK(product.is_chrome());
// Try to add Chrome to Media Player shim inclusion list. We don't do any
// error checking here because this operation will fail if user doesn't
// have admin rights and we want to ignore the error.
AddChromeToMediaPlayerList();
// Make Chrome the default browser if desired when possible. Otherwise, only
// register it with Windows.
BrowserDistribution* dist = product.distribution();
const base::FilePath chrome_exe(
installer_state.target_path().Append(installer::kChromeExe));
VLOG(1) << "Registering Chrome as browser: " << chrome_exe.value();
if (make_chrome_default && ShellUtil::CanMakeChromeDefaultUnattended()) {
int level = ShellUtil::CURRENT_USER;
if (installer_state.system_install())
level = level | ShellUtil::SYSTEM_LEVEL;
ShellUtil::MakeChromeDefault(dist, level, chrome_exe, true);
} else {
ShellUtil::RegisterChromeBrowser(dist, chrome_exe, base::string16(), false);
}
}
InstallStatus InstallOrUpdateProduct(
const installer::InstallationState& original_state,
const installer::InstallerState& installer_state,
const base::FilePath& setup_path,
const base::FilePath& archive_path,
const base::FilePath& install_temp_path,
const base::FilePath& src_path,
const base::FilePath& prefs_path,
const MasterPreferences& prefs,
const Version& new_version) {
DCHECK(!installer_state.products().empty());
// TODO(robertshield): Removing the pending on-reboot moves should be done
// elsewhere.
// Remove any scheduled MOVEFILE_DELAY_UNTIL_REBOOT entries in the target of
// this installation. These may have been added during a previous uninstall of
// the same version.
LOG_IF(ERROR, !RemoveFromMovesPendingReboot(installer_state.target_path()))
<< "Error accessing pending moves value.";
// Create VisualElementManifest.xml in |src_path| (if required) so that it
// looks as if it had been extracted from the archive when calling
// InstallNewVersion() below.
installer_state.UpdateStage(installer::CREATING_VISUAL_MANIFEST);
CreateVisualElementsManifest(src_path, new_version);
scoped_ptr<Version> existing_version;
InstallStatus result = InstallNewVersion(original_state, installer_state,
setup_path, archive_path, src_path, install_temp_path, new_version,
&existing_version);
// TODO(robertshield): Everything below this line should instead be captured
// by WorkItems.
if (!InstallUtil::GetInstallReturnCode(result)) {
installer_state.UpdateStage(installer::UPDATING_CHANNELS);
// Update the modifiers on the channel values for the product(s) being
// installed and for the binaries in case of multi-install.
installer_state.UpdateChannels();
installer_state.UpdateStage(installer::COPYING_PREFERENCES_FILE);
if (result == FIRST_INSTALL_SUCCESS && !prefs_path.empty())
CopyPreferenceFileForFirstRun(installer_state, prefs_path);
installer_state.UpdateStage(installer::CREATING_SHORTCUTS);
const installer::Product* chrome_product =
installer_state.FindProduct(BrowserDistribution::CHROME_BROWSER);
// Creates shortcuts for Chrome.
if (chrome_product) {
BrowserDistribution* chrome_dist = chrome_product->distribution();
const base::FilePath chrome_exe(
installer_state.target_path().Append(kChromeExe));
// Install per-user shortcuts on user-level installs and all-users
// shortcuts on system-level installs. Note that Active Setup will take
// care of installing missing per-user shortcuts on system-level install
// (i.e., quick launch, taskbar pin, and possibly deleted all-users
// shortcuts).
InstallShortcutLevel install_level = installer_state.system_install() ?
ALL_USERS : CURRENT_USER;
InstallShortcutOperation install_operation =
INSTALL_SHORTCUT_REPLACE_EXISTING;
if (result == installer::FIRST_INSTALL_SUCCESS ||
result == installer::INSTALL_REPAIRED ||
!original_state.GetProductState(installer_state.system_install(),
chrome_dist->GetType())) {
// Always create the shortcuts on a new install, a repair install, and
// when the Chrome product is being added to the current install.
install_operation = INSTALL_SHORTCUT_CREATE_ALL;
}
CreateOrUpdateShortcuts(chrome_exe, *chrome_product, prefs, install_level,
install_operation);
}
if (chrome_product) {
// Register Chrome and, if requested, make Chrome the default browser.
installer_state.UpdateStage(installer::REGISTERING_CHROME);
bool make_chrome_default = false;
prefs.GetBool(master_preferences::kMakeChromeDefault,
&make_chrome_default);
// If this is not the user's first Chrome install, but they have chosen
// Chrome to become their default browser on the download page, we must
// force it here because the master_preferences file will not get copied
// into the build.
bool force_chrome_default_for_user = false;
if (result == NEW_VERSION_UPDATED ||
result == INSTALL_REPAIRED) {
prefs.GetBool(master_preferences::kMakeChromeDefaultForUser,
&force_chrome_default_for_user);
}
RegisterChromeOnMachine(installer_state, *chrome_product,
make_chrome_default || force_chrome_default_for_user);
// Configure auto-launch.
if (result == FIRST_INSTALL_SUCCESS) {
installer_state.UpdateStage(installer::CONFIGURE_AUTO_LAUNCH);
// Add auto-launch key if specified in master_preferences.
bool auto_launch_chrome = false;
prefs.GetBool(
installer::master_preferences::kAutoLaunchChrome,
&auto_launch_chrome);
if (auto_launch_chrome) {
auto_launch_util::EnableForegroundStartAtLogin(
base::ASCIIToUTF16(chrome::kInitialProfile),
installer_state.target_path());
}
}
if (!installer_state.system_install()) {
DCHECK_EQ(chrome_product->distribution(),
BrowserDistribution::GetDistribution());
UpdateDefaultBrowserBeaconForPath(
installer_state.target_path().Append(installer::kChromeExe));
}
}
installer_state.UpdateStage(installer::REMOVING_OLD_VERSIONS);
installer_state.RemoveOldVersionDirectories(
new_version,
existing_version.get(),
install_temp_path);
}
return result;
}
void HandleOsUpgradeForBrowser(const installer::InstallerState& installer_state,
const installer::Product& chrome,
const base::Version& installed_version) {
DCHECK(chrome.is_chrome());
VLOG(1) << "Updating and registering shortcuts for --on-os-upgrade.";
// Read master_preferences copied beside chrome.exe at install.
const MasterPreferences prefs(
installer_state.target_path().AppendASCII(kDefaultMasterPrefs));
// Update shortcuts at this install level (per-user shortcuts on system-level
// installs will be updated through Active Setup).
const InstallShortcutLevel level =
installer_state.system_install() ? ALL_USERS : CURRENT_USER;
const base::FilePath chrome_exe(
installer_state.target_path().Append(kChromeExe));
CreateOrUpdateShortcuts(chrome_exe, chrome, prefs, level,
INSTALL_SHORTCUT_REPLACE_EXISTING);
// Adapt Chrome registrations to this new OS.
RegisterChromeOnMachine(installer_state, chrome, false);
// Active Setup registrations are sometimes lost across OS update, make sure
// they're back in place. Note: when Active Setup registrations in HKLM are
// lost, the per-user values of performed Active Setups in HKCU are also lost,
// so it is fine to restart the dynamic components of the Active Setup version
// (ref. UpdateActiveSetupVersionWorkItem) from scratch.
// TODO(gab): This should really perform all registry only update steps (i.e.,
// something between InstallOrUpdateProduct and AddActiveSetupWorkItems, but
// this takes care of what is most required for now).
scoped_ptr<WorkItemList> work_item_list(WorkItem::CreateWorkItemList());
AddActiveSetupWorkItems(installer_state, installed_version, chrome,
work_item_list.get());
if (!work_item_list->Do()) {
LOG(WARNING) << "Failed to reinstall Active Setup keys.";
work_item_list->Rollback();
}
UpdateOsUpgradeBeacon(installer_state.system_install(),
BrowserDistribution::GetDistribution());
// Update the per-user default browser beacon. For user-level installs this
// can be done directly; whereas it requires triggering Active Setup for each
// user's subsequent login on system-level installs.
if (!installer_state.system_install()) {
UpdateDefaultBrowserBeaconForPath(chrome_exe);
} else {
UpdateActiveSetupVersionWorkItem active_setup_work_item(
InstallUtil::GetActiveSetupPath(chrome.distribution()),
UpdateActiveSetupVersionWorkItem::
UPDATE_AND_BUMP_OS_UPGRADES_COMPONENT);
if (active_setup_work_item.Do())
VLOG(1) << "Bumped Active Setup Version on-os-upgrade.";
else
LOG(ERROR) << "Failed to bump Active Setup Version on-os-upgrade.";
}
}
// NOTE: Should the work done here, on Active Setup, change: kActiveSetupVersion
// in update_active_setup_version_work_item.cc needs to be increased for Active
// Setup to invoke this again for all users of this install. It may also be
// invoked again when a system-level chrome install goes through an OS upgrade.
void HandleActiveSetupForBrowser(const base::FilePath& installation_root,
const installer::Product& chrome,
bool force) {
DCHECK(chrome.is_chrome());
// Only create shortcuts on Active Setup if the first run sentinel is not
// present for this user (as some shortcuts used to be installed on first
// run and this could otherwise re-install shortcuts for users that have
// already deleted them in the past).
// Decide whether to create the shortcuts or simply replace existing
// shortcuts; if the decision is to create them, only shortcuts whose matching
// all-users shortcut isn't present on the system will be created.
InstallShortcutOperation install_operation =
(!force && InstallUtil::IsFirstRunSentinelPresent())
? INSTALL_SHORTCUT_REPLACE_EXISTING
: INSTALL_SHORTCUT_CREATE_EACH_IF_NO_SYSTEM_LEVEL;
// Read master_preferences copied beside chrome.exe at install.
MasterPreferences prefs(installation_root.AppendASCII(kDefaultMasterPrefs));
base::FilePath chrome_exe(installation_root.Append(kChromeExe));
CreateOrUpdateShortcuts(
chrome_exe, chrome, prefs, CURRENT_USER, install_operation);
UpdateDefaultBrowserBeaconForPath(chrome_exe);
}
} // namespace installer
| CapOM/ChromiumGStreamerBackend | chrome/installer/setup/install.cc | C++ | bsd-3-clause | 29,451 |
/*
YUI 3.6.0 (build 5521)
Copyright 2012 Yahoo! Inc. All rights reserved.
Licensed under the BSD License.
http://yuilibrary.com/license/
*/
YUI.add('exec-command', function(Y) {
/**
* Plugin for the frame module to handle execCommands for Editor
* @class Plugin.ExecCommand
* @extends Base
* @constructor
* @module editor
* @submodule exec-command
*/
var ExecCommand = function() {
ExecCommand.superclass.constructor.apply(this, arguments);
};
Y.extend(ExecCommand, Y.Base, {
/**
* An internal reference to the keyCode of the last key that was pressed.
* @private
* @property _lastKey
*/
_lastKey: null,
/**
* An internal reference to the instance of the frame plugged into.
* @private
* @property _inst
*/
_inst: null,
/**
* Execute a command on the frame's document.
* @method command
* @param {String} action The action to perform (bold, italic, fontname)
* @param {String} value The optional value (helvetica)
* @return {Node/NodeList} Should return the Node/Nodelist affected
*/
command: function(action, value) {
var fn = ExecCommand.COMMANDS[action];
Y.log('execCommand(' + action + '): "' + value + '"', 'info', 'exec-command');
if (fn) {
Y.log('OVERIDE execCommand(' + action + '): "' + value + '"', 'info', 'exec-command');
return fn.call(this, action, value);
} else {
return this._command(action, value);
}
},
/**
* The private version of execCommand that doesn't filter for overrides.
* @private
* @method _command
* @param {String} action The action to perform (bold, italic, fontname)
* @param {String} value The optional value (helvetica)
*/
_command: function(action, value) {
var inst = this.getInstance();
try {
try {
inst.config.doc.execCommand('styleWithCSS', null, 1);
} catch (e1) {
try {
inst.config.doc.execCommand('useCSS', null, 0);
} catch (e2) {
}
}
Y.log('Using default browser execCommand(' + action + '): "' + value + '"', 'info', 'exec-command');
inst.config.doc.execCommand(action, null, value);
} catch (e) {
Y.log(e.message, 'warn', 'exec-command');
}
},
/**
* Get's the instance of YUI bound to the parent frame
* @method getInstance
* @return {YUI} The YUI instance bound to the parent frame
*/
getInstance: function() {
if (!this._inst) {
this._inst = this.get('host').getInstance();
}
return this._inst;
},
initializer: function() {
Y.mix(this.get('host'), {
execCommand: function(action, value) {
return this.exec.command(action, value);
},
_execCommand: function(action, value) {
return this.exec._command(action, value);
}
});
this.get('host').on('dom:keypress', Y.bind(function(e) {
this._lastKey = e.keyCode;
}, this));
},
_wrapContent: function(str, override) {
var useP = (this.getInstance().host.editorPara && !override ? true : false);
if (useP) {
str = '<p>' + str + '</p>';
} else {
str = str + '<br>';
}
return str;
}
}, {
/**
* execCommand
* @property NAME
* @static
*/
NAME: 'execCommand',
/**
* exec
* @property NS
* @static
*/
NS: 'exec',
ATTRS: {
host: {
value: false
}
},
/**
* Static object literal of execCommand overrides
* @property COMMANDS
* @static
*/
COMMANDS: {
/**
* Wraps the content with a new element of type (tag)
* @method COMMANDS.wrap
* @static
* @param {String} cmd The command executed: wrap
* @param {String} tag The tag to wrap the selection with
* @return {NodeList} NodeList of the items touched by this command.
*/
wrap: function(cmd, tag) {
var inst = this.getInstance();
return (new inst.EditorSelection()).wrapContent(tag);
},
/**
* Inserts the provided HTML at the cursor, should be a single element.
* @method COMMANDS.inserthtml
* @static
* @param {String} cmd The command executed: inserthtml
* @param {String} html The html to insert
* @return {Node} Node instance of the item touched by this command.
*/
inserthtml: function(cmd, html) {
var inst = this.getInstance();
if (inst.EditorSelection.hasCursor() || Y.UA.ie) {
return (new inst.EditorSelection()).insertContent(html);
} else {
this._command('inserthtml', html);
}
},
/**
* Inserts the provided HTML at the cursor, and focuses the cursor afterwards.
* @method COMMANDS.insertandfocus
* @static
* @param {String} cmd The command executed: insertandfocus
* @param {String} html The html to insert
* @return {Node} Node instance of the item touched by this command.
*/
insertandfocus: function(cmd, html) {
var inst = this.getInstance(), out, sel;
if (inst.EditorSelection.hasCursor()) {
html += inst.EditorSelection.CURSOR;
out = this.command('inserthtml', html);
sel = new inst.EditorSelection();
sel.focusCursor(true, true);
} else {
this.command('inserthtml', html);
}
return out;
},
/**
* Inserts a BR at the current cursor position
* @method COMMANDS.insertbr
* @static
* @param {String} cmd The command executed: insertbr
*/
insertbr: function(cmd) {
var inst = this.getInstance(),
sel = new inst.EditorSelection(),
html = '<var>|</var>', last = null,
q = (Y.UA.webkit) ? 'span.Apple-style-span,var' : 'var';
if (sel._selection.pasteHTML) {
sel._selection.pasteHTML(html);
} else {
this._command('inserthtml', html);
}
var insert = function(n) {
var c = inst.Node.create('<br>');
n.insert(c, 'before');
return c;
};
inst.all(q).each(function(n) {
var g = true;
if (Y.UA.webkit) {
g = false;
if (n.get('innerHTML') === '|') {
g = true;
}
}
if (g) {
last = insert(n);
if ((!last.previous() || !last.previous().test('br')) && Y.UA.gecko) {
var s = last.cloneNode();
last.insert(s, 'after');
last = s;
}
n.remove();
}
});
if (Y.UA.webkit && last) {
insert(last);
sel.selectNode(last);
}
},
/**
* Inserts an image at the cursor position
* @method COMMANDS.insertimage
* @static
* @param {String} cmd The command executed: insertimage
* @param {String} img The url of the image to be inserted
* @return {Node} Node instance of the item touched by this command.
*/
insertimage: function(cmd, img) {
return this.command('inserthtml', '<img src="' + img + '">');
},
/**
* Add a class to all of the elements in the selection
* @method COMMANDS.addclass
* @static
* @param {String} cmd The command executed: addclass
* @param {String} cls The className to add
* @return {NodeList} NodeList of the items touched by this command.
*/
addclass: function(cmd, cls) {
var inst = this.getInstance();
return (new inst.EditorSelection()).getSelected().addClass(cls);
},
/**
* Remove a class from all of the elements in the selection
* @method COMMANDS.removeclass
* @static
* @param {String} cmd The command executed: removeclass
* @param {String} cls The className to remove
* @return {NodeList} NodeList of the items touched by this command.
*/
removeclass: function(cmd, cls) {
var inst = this.getInstance();
return (new inst.EditorSelection()).getSelected().removeClass(cls);
},
/**
* Adds a forecolor to the current selection, or creates a new element and applies it
* @method COMMANDS.forecolor
* @static
* @param {String} cmd The command executed: forecolor
* @param {String} val The color value to apply
* @return {NodeList} NodeList of the items touched by this command.
*/
forecolor: function(cmd, val) {
var inst = this.getInstance(),
sel = new inst.EditorSelection(), n;
if (!Y.UA.ie) {
this._command('useCSS', false);
}
if (inst.EditorSelection.hasCursor()) {
if (sel.isCollapsed) {
if (sel.anchorNode && (sel.anchorNode.get('innerHTML') === ' ')) {
sel.anchorNode.setStyle('color', val);
n = sel.anchorNode;
} else {
n = this.command('inserthtml', '<span style="color: ' + val + '">' + inst.EditorSelection.CURSOR + '</span>');
sel.focusCursor(true, true);
}
return n;
} else {
return this._command(cmd, val);
}
} else {
this._command(cmd, val);
}
},
/**
* Adds a background color to the current selection, or creates a new element and applies it
* @method COMMANDS.backcolor
* @static
* @param {String} cmd The command executed: backcolor
* @param {String} val The color value to apply
* @return {NodeList} NodeList of the items touched by this command.
*/
backcolor: function(cmd, val) {
var inst = this.getInstance(),
sel = new inst.EditorSelection(), n;
if (Y.UA.gecko || Y.UA.opera) {
cmd = 'hilitecolor';
}
if (!Y.UA.ie) {
this._command('useCSS', false);
}
if (inst.EditorSelection.hasCursor()) {
if (sel.isCollapsed) {
if (sel.anchorNode && (sel.anchorNode.get('innerHTML') === ' ')) {
sel.anchorNode.setStyle('backgroundColor', val);
n = sel.anchorNode;
} else {
n = this.command('inserthtml', '<span style="background-color: ' + val + '">' + inst.EditorSelection.CURSOR + '</span>');
sel.focusCursor(true, true);
}
return n;
} else {
return this._command(cmd, val);
}
} else {
this._command(cmd, val);
}
},
/**
* Sugar method, calles backcolor
* @method COMMANDS.hilitecolor
* @static
* @param {String} cmd The command executed: backcolor
* @param {String} val The color value to apply
* @return {NodeList} NodeList of the items touched by this command.
*/
hilitecolor: function() {
return ExecCommand.COMMANDS.backcolor.apply(this, arguments);
},
/**
* Adds a font name to the current selection, or creates a new element and applies it
* @method COMMANDS.fontname2
* @deprecated
* @static
* @param {String} cmd The command executed: fontname
* @param {String} val The font name to apply
* @return {NodeList} NodeList of the items touched by this command.
*/
fontname2: function(cmd, val) {
this._command('fontname', val);
var inst = this.getInstance(),
sel = new inst.EditorSelection();
if (sel.isCollapsed && (this._lastKey != 32)) {
if (sel.anchorNode.test('font')) {
sel.anchorNode.set('face', val);
}
}
},
/**
* Adds a fontsize to the current selection, or creates a new element and applies it
* @method COMMANDS.fontsize2
* @deprecated
* @static
* @param {String} cmd The command executed: fontsize
* @param {String} val The font size to apply
* @return {NodeList} NodeList of the items touched by this command.
*/
fontsize2: function(cmd, val) {
this._command('fontsize', val);
var inst = this.getInstance(),
sel = new inst.EditorSelection();
if (sel.isCollapsed && sel.anchorNode && (this._lastKey != 32)) {
if (Y.UA.webkit) {
if (sel.anchorNode.getStyle('lineHeight')) {
sel.anchorNode.setStyle('lineHeight', '');
}
}
if (sel.anchorNode.test('font')) {
sel.anchorNode.set('size', val);
} else if (Y.UA.gecko) {
var p = sel.anchorNode.ancestor(inst.EditorSelection.DEFAULT_BLOCK_TAG);
if (p) {
p.setStyle('fontSize', '');
}
}
}
},
/**
* Overload for COMMANDS.list
* @method COMMANDS.insertorderedlist
* @static
* @param {String} cmd The command executed: list, ul
*/
insertunorderedlist: function(cmd) {
this.command('list', 'ul');
},
/**
* Overload for COMMANDS.list
* @method COMMANDS.insertunorderedlist
* @static
* @param {String} cmd The command executed: list, ol
*/
insertorderedlist: function(cmd) {
this.command('list', 'ol');
},
/**
* Noramlizes lists creation/destruction for IE. All others pass through to native calls
* @method COMMANDS.list
* @static
* @param {String} cmd The command executed: list (not used)
* @param {String} tag The tag to deal with
*/
list: function(cmd, tag) {
var inst = this.getInstance(), html, self = this,
/*
The yui3- class name below is not a skinnable class,
it's a utility class used internally by editor and
stripped when completed, calling getClassName on this
is a waste of resources.
*/
DIR = 'dir', cls = 'yui3-touched',
dir, range, div, elm, n, str, s, par, list, lis,
useP = (inst.host.editorPara ? true : false),
sel = new inst.EditorSelection();
cmd = 'insert' + ((tag === 'ul') ? 'un' : '') + 'orderedlist';
if (Y.UA.ie && !sel.isCollapsed) {
range = sel._selection;
html = range.htmlText;
div = inst.Node.create(html) || inst.one('body');
if (div.test('li') || div.one('li')) {
this._command(cmd, null);
return;
}
if (div.test(tag)) {
elm = range.item ? range.item(0) : range.parentElement();
n = inst.one(elm);
lis = n.all('li');
str = '<div>';
lis.each(function(l) {
str = self._wrapContent(l.get('innerHTML'));
});
str += '</div>';
s = inst.Node.create(str);
if (n.get('parentNode').test('div')) {
n = n.get('parentNode');
}
if (n && n.hasAttribute(DIR)) {
if (useP) {
s.all('p').setAttribute(DIR, n.getAttribute(DIR));
} else {
s.setAttribute(DIR, n.getAttribute(DIR));
}
}
if (useP) {
n.replace(s.get('innerHTML'));
} else {
n.replace(s);
}
if (range.moveToElementText) {
range.moveToElementText(s._node);
}
range.select();
} else {
par = Y.one(range.parentElement());
if (!par.test(inst.EditorSelection.BLOCKS)) {
par = par.ancestor(inst.EditorSelection.BLOCKS);
}
if (par) {
if (par.hasAttribute(DIR)) {
dir = par.getAttribute(DIR);
}
}
if (html.indexOf('<br>') > -1) {
html = html.split(/<br>/i);
} else {
var tmp = inst.Node.create(html),
ps = tmp ? tmp.all('p') : null;
if (ps && ps.size()) {
html = [];
ps.each(function(n) {
html.push(n.get('innerHTML'));
});
} else {
html = [html];
}
}
list = '<' + tag + ' id="ie-list">';
Y.each(html, function(v) {
var a = inst.Node.create(v);
if (a && a.test('p')) {
if (a.hasAttribute(DIR)) {
dir = a.getAttribute(DIR);
}
v = a.get('innerHTML');
}
list += '<li>' + v + '</li>';
});
list += '</' + tag + '>';
range.pasteHTML(list);
elm = inst.config.doc.getElementById('ie-list');
elm.id = '';
if (dir) {
elm.setAttribute(DIR, dir);
}
if (range.moveToElementText) {
range.moveToElementText(elm);
}
range.select();
}
} else if (Y.UA.ie) {
par = inst.one(sel._selection.parentElement());
if (par.test('p')) {
if (par && par.hasAttribute(DIR)) {
dir = par.getAttribute(DIR);
}
html = Y.EditorSelection.getText(par);
if (html === '') {
var sdir = '';
if (dir) {
sdir = ' dir="' + dir + '"';
}
list = inst.Node.create(Y.Lang.sub('<{tag}{dir}><li></li></{tag}>', { tag: tag, dir: sdir }));
par.replace(list);
sel.selectNode(list.one('li'));
} else {
this._command(cmd, null);
}
} else {
this._command(cmd, null);
}
} else {
inst.all(tag).addClass(cls);
if (sel.anchorNode.test(inst.EditorSelection.BLOCKS)) {
par = sel.anchorNode;
} else {
par = sel.anchorNode.ancestor(inst.EditorSelection.BLOCKS);
}
if (!par) { //No parent, find the first block under the anchorNode
par = sel.anchorNode.one(inst.EditorSelection.BLOCKS);
}
if (par && par.hasAttribute(DIR)) {
dir = par.getAttribute(DIR);
}
if (par && par.test(tag)) {
var hasPParent = par.ancestor('p');
html = inst.Node.create('<div/>');
elm = par.all('li');
elm.each(function(h) {
html.append(self._wrapContent(h.get('innerHTML'), hasPParent));
});
if (dir) {
if (useP) {
html.all('p').setAttribute(DIR, dir);
} else {
html.setAttribute(DIR, dir);
}
}
if (useP) {
html = inst.Node.create(html.get('innerHTML'));
}
var fc = html.get('firstChild');
par.replace(html);
sel.selectNode(fc);
} else {
this._command(cmd, null);
}
list = inst.all(tag);
if (dir) {
if (list.size()) {
//Changed to a List
list.each(function(n) {
if (!n.hasClass(cls)) {
n.setAttribute(DIR, dir);
}
});
}
}
list.removeClass(cls);
}
},
/**
* Noramlizes alignment for Webkit Browsers
* @method COMMANDS.justify
* @static
* @param {String} cmd The command executed: justify (not used)
* @param {String} val The actual command from the justify{center,all,left,right} stubs
*/
justify: function(cmd, val) {
if (Y.UA.webkit) {
var inst = this.getInstance(),
sel = new inst.EditorSelection(),
aNode = sel.anchorNode;
var bgColor = aNode.getStyle('backgroundColor');
this._command(val);
sel = new inst.EditorSelection();
if (sel.anchorNode.test('div')) {
var html = '<span>' + sel.anchorNode.get('innerHTML') + '</span>';
sel.anchorNode.set('innerHTML', html);
sel.anchorNode.one('span').setStyle('backgroundColor', bgColor);
sel.selectNode(sel.anchorNode.one('span'));
}
} else {
this._command(val);
}
},
/**
* Override method for COMMANDS.justify
* @method COMMANDS.justifycenter
* @static
*/
justifycenter: function(cmd) {
this.command('justify', 'justifycenter');
},
/**
* Override method for COMMANDS.justify
* @method COMMANDS.justifyleft
* @static
*/
justifyleft: function(cmd) {
this.command('justify', 'justifyleft');
},
/**
* Override method for COMMANDS.justify
* @method COMMANDS.justifyright
* @static
*/
justifyright: function(cmd) {
this.command('justify', 'justifyright');
},
/**
* Override method for COMMANDS.justify
* @method COMMANDS.justifyfull
* @static
*/
justifyfull: function(cmd) {
this.command('justify', 'justifyfull');
}
}
});
/**
* This method is meant to normalize IE's in ability to exec the proper command on elements with CSS styling.
* @method fixIETags
* @protected
* @param {String} cmd The command to execute
* @param {String} tag The tag to create
* @param {String} rule The rule that we are looking for.
*/
var fixIETags = function(cmd, tag, rule) {
var inst = this.getInstance(),
doc = inst.config.doc,
sel = doc.selection.createRange(),
o = doc.queryCommandValue(cmd),
html, reg, m, p, d, s, c;
if (o) {
html = sel.htmlText;
reg = new RegExp(rule, 'g');
m = html.match(reg);
if (m) {
html = html.replace(rule + ';', '').replace(rule, '');
sel.pasteHTML('<var id="yui-ie-bs">');
p = doc.getElementById('yui-ie-bs');
d = doc.createElement('div');
s = doc.createElement(tag);
d.innerHTML = html;
if (p.parentNode !== inst.config.doc.body) {
p = p.parentNode;
}
c = d.childNodes;
p.parentNode.replaceChild(s, p);
Y.each(c, function(f) {
s.appendChild(f);
});
sel.collapse();
if (sel.moveToElementText) {
sel.moveToElementText(s);
}
sel.select();
}
}
this._command(cmd);
};
if (Y.UA.ie) {
ExecCommand.COMMANDS.bold = function() {
fixIETags.call(this, 'bold', 'b', 'FONT-WEIGHT: bold');
};
ExecCommand.COMMANDS.italic = function() {
fixIETags.call(this, 'italic', 'i', 'FONT-STYLE: italic');
};
ExecCommand.COMMANDS.underline = function() {
fixIETags.call(this, 'underline', 'u', 'TEXT-DECORATION: underline');
};
}
Y.namespace('Plugin');
Y.Plugin.ExecCommand = ExecCommand;
}, '3.6.0' ,{skinnable:false, requires:['frame']});
| bretkikehara/wattdepot-visualization | src/main/webapp/yui/3.6.0/build/exec-command/exec-command-debug.js | JavaScript | bsd-3-clause | 31,671 |
from functools import wraps
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render
from django.utils.six.moves.urllib import parse
from django.utils.translation import ugettext_lazy as _
from oscar.core.compat import user_is_authenticated
def staff_member_required(view_func, login_url=None):
"""
Ensure that the user is a logged-in staff member.
* If not authenticated, redirect to a specified login URL.
* If not staff, show a 403 page
This decorator is based on the decorator with the same name from
django.contrib.admin.views.decorators. This one is superior as it allows a
redirect URL to be specified.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view_func(request, *args, **kwargs)
# If user is not logged in, redirect to login page
if not user_is_authenticated(request.user):
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
path = request.build_absolute_uri()
login_scheme, login_netloc = parse.urlparse(login_url)[:2]
current_scheme, current_netloc = parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
messages.warning(request, _("You must log in to access this page"))
return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME)
else:
# User does not have permission to view this page
raise PermissionDenied
return _checklogin
def check_permissions(user, permissions):
"""
Permissions can be a list or a tuple of lists. If it is a tuple,
every permission list will be evaluated and the outcome will be checked
for truthiness.
Each item of the list(s) must be either a valid Django permission name
(model.codename) or a property or method on the User model
(e.g. 'is_active', 'is_superuser').
Example usage:
- permissions_required(['is_staff', ])
would replace staff_member_required
- permissions_required(['is_anonymous', ])
would replace login_forbidden
- permissions_required((['is_staff',], ['partner.dashboard_access']))
allows both staff users and users with the above permission
"""
def _check_one_permission_list(perms):
regular_permissions = [perm for perm in perms if '.' in perm]
conditions = [perm for perm in perms if '.' not in perm]
# always check for is_active if not checking for is_anonymous
if (conditions and
'is_anonymous' not in conditions and
'is_active' not in conditions):
conditions.append('is_active')
attributes = [getattr(user, perm) for perm in conditions]
# evaluates methods, explicitly casts properties to booleans
passes_conditions = all([
attr() if callable(attr) else bool(attr) for attr in attributes])
return passes_conditions and user.has_perms(regular_permissions)
if not permissions:
return True
elif isinstance(permissions, list):
return _check_one_permission_list(permissions)
else:
return any(_check_one_permission_list(perm) for perm in permissions)
def permissions_required(permissions, login_url=None):
"""
Decorator that checks if a user has the given permissions.
Accepts a list or tuple of lists of permissions (see check_permissions
documentation).
If the user is not logged in and the test fails, she is redirected to a
login page. If the user is logged in, she gets a HTTP 403 Permission Denied
message, analogous to Django's permission_required decorator.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
def _check_permissions(user):
outcome = check_permissions(user, permissions)
if not outcome and user_is_authenticated(user):
raise PermissionDenied
else:
return outcome
return user_passes_test(_check_permissions, login_url=login_url)
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not user_is_authenticated(request.user):
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| sonofatailor/django-oscar | src/oscar/views/decorators.py | Python | bsd-3-clause | 5,064 |
/*
* Copyright 2016 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* global PaymentRequest:false */
/**
* Launches the PaymentRequest UI that offers free shipping in California and
* $5.00 shipping in US. Does not allow shipping outside of US.
*
* Legacy entry-point until basic-card is disabled
*/
function buy() { // eslint-disable-line no-unused-vars
buyWithMethods(
[{supportedMethods: 'basic-card', data: {supportedNetworks: ['visa']}}]);
}
/**
* Launches the PaymentRequest UI that offers free shipping in California and
* $5.00 shipping in US. Does not allow shipping outside of US.
*
* @param {String} methodData - An array of payment method objects.
*/
function buyWithMethods(methodData) { // eslint-disable-line no-unused-vars
try {
var details = {
total: {label: 'Total', amount: {currency: 'USD', value: '5.00'}},
displayItems: [
{
label: 'Pending shipping price',
amount: {currency: 'USD', value: '0.00'},
pending: true,
},
{label: 'Subtotal', amount: {currency: 'USD', value: '5.00'}},
],
};
var request = new PaymentRequest(
methodData, details, {requestShipping: true});
request.addEventListener('shippingaddresschange', function(evt) {
evt.updateWith(new Promise(function(resolve) {
resolve(updateDetails(details, request.shippingAddress));
}));
});
request.show()
.then(function(resp) {
resp.complete('success')
.then(function() {
print(JSON.stringify(resp, undefined, 2));
})
.catch(function(error) {
print(error);
});
})
.catch(function(error) {
print(error);
});
} catch (error) {
print(error.message);
}
}
/**
* Updates the shopping cart with the appropriate shipping prices according to
* the shipping address.
* @param {object} details - The shopping cart.
* @param {ShippingAddress} addr - The shipping address.
* @return {object} The updated shopping cart.
*/
function updateDetails(details, addr) {
if (addr.country === 'US') {
var shippingOption = {
id: '',
label: '',
amount: {currency: 'USD', value: '0.00'},
selected: true,
};
if (addr.region === 'CA') {
shippingOption.id = 'californiaShippingOption';
shippingOption.label = 'Free shipping in California';
details.total.amount.value = '5.00';
} else {
shippingOption.id = 'usShippingOption';
shippingOption.label = 'Standard shipping in US';
shippingOption.amount.value = '5.00';
details.total.amount.value = '10.00';
}
details.displayItems.splice(0, 1, shippingOption);
details.shippingOptions = [shippingOption];
} else {
details.shippingOptions = [];
details.error = 'We do not ship to this address';
details.shippingAddressErrors = {
addressLine: 'ADDRESS LINE ERROR',
city: 'CITY ERROR',
};
}
return details;
}
| chromium/chromium | components/test/data/payments/dynamic_shipping.js | JavaScript | bsd-3-clause | 3,140 |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/socket/transport_client_socket_pool_test_util.h"
#include <stdint.h>
#include <string>
#include <utility>
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "net/base/ip_address.h"
#include "net/base/ip_endpoint.h"
#include "net/base/load_timing_info.h"
#include "net/base/load_timing_info_test_util.h"
#include "net/socket/client_socket_handle.h"
#include "net/socket/ssl_client_socket.h"
#include "net/udp/datagram_client_socket.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
namespace {
IPAddress ParseIP(const std::string& ip) {
IPAddress address;
CHECK(address.AssignFromIPLiteral(ip));
return address;
}
// A StreamSocket which connects synchronously and successfully.
class MockConnectClientSocket : public StreamSocket {
public:
MockConnectClientSocket(const AddressList& addrlist, net::NetLog* net_log)
: connected_(false),
addrlist_(addrlist),
net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)) {}
// StreamSocket implementation.
int Connect(const CompletionCallback& callback) override {
connected_ = true;
return OK;
}
void Disconnect() override { connected_ = false; }
bool IsConnected() const override { return connected_; }
bool IsConnectedAndIdle() const override { return connected_; }
int GetPeerAddress(IPEndPoint* address) const override {
*address = addrlist_.front();
return OK;
}
int GetLocalAddress(IPEndPoint* address) const override {
if (!connected_)
return ERR_SOCKET_NOT_CONNECTED;
if (addrlist_.front().GetFamily() == ADDRESS_FAMILY_IPV4)
SetIPv4Address(address);
else
SetIPv6Address(address);
return OK;
}
const BoundNetLog& NetLog() const override { return net_log_; }
void SetSubresourceSpeculation() override {}
void SetOmniboxSpeculation() override {}
bool WasEverUsed() const override { return false; }
void EnableTCPFastOpenIfSupported() override {}
bool WasNpnNegotiated() const override { return false; }
NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; }
bool GetSSLInfo(SSLInfo* ssl_info) override { return false; }
void GetConnectionAttempts(ConnectionAttempts* out) const override {
out->clear();
}
void ClearConnectionAttempts() override {}
void AddConnectionAttempts(const ConnectionAttempts& attempts) override {}
int64_t GetTotalReceivedBytes() const override {
NOTIMPLEMENTED();
return 0;
}
// Socket implementation.
int Read(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int Write(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int SetReceiveBufferSize(int32_t size) override { return OK; }
int SetSendBufferSize(int32_t size) override { return OK; }
private:
bool connected_;
const AddressList addrlist_;
BoundNetLog net_log_;
DISALLOW_COPY_AND_ASSIGN(MockConnectClientSocket);
};
class MockFailingClientSocket : public StreamSocket {
public:
MockFailingClientSocket(const AddressList& addrlist, net::NetLog* net_log)
: addrlist_(addrlist),
net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)) {}
// StreamSocket implementation.
int Connect(const CompletionCallback& callback) override {
return ERR_CONNECTION_FAILED;
}
void Disconnect() override {}
bool IsConnected() const override { return false; }
bool IsConnectedAndIdle() const override { return false; }
int GetPeerAddress(IPEndPoint* address) const override {
return ERR_UNEXPECTED;
}
int GetLocalAddress(IPEndPoint* address) const override {
return ERR_UNEXPECTED;
}
const BoundNetLog& NetLog() const override { return net_log_; }
void SetSubresourceSpeculation() override {}
void SetOmniboxSpeculation() override {}
bool WasEverUsed() const override { return false; }
void EnableTCPFastOpenIfSupported() override {}
bool WasNpnNegotiated() const override { return false; }
NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; }
bool GetSSLInfo(SSLInfo* ssl_info) override { return false; }
void GetConnectionAttempts(ConnectionAttempts* out) const override {
out->clear();
for (const auto& addr : addrlist_)
out->push_back(ConnectionAttempt(addr, ERR_CONNECTION_FAILED));
}
void ClearConnectionAttempts() override {}
void AddConnectionAttempts(const ConnectionAttempts& attempts) override {}
int64_t GetTotalReceivedBytes() const override {
NOTIMPLEMENTED();
return 0;
}
// Socket implementation.
int Read(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int Write(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int SetReceiveBufferSize(int32_t size) override { return OK; }
int SetSendBufferSize(int32_t size) override { return OK; }
private:
const AddressList addrlist_;
BoundNetLog net_log_;
DISALLOW_COPY_AND_ASSIGN(MockFailingClientSocket);
};
class MockTriggerableClientSocket : public StreamSocket {
public:
// |should_connect| indicates whether the socket should successfully complete
// or fail.
MockTriggerableClientSocket(const AddressList& addrlist,
bool should_connect,
net::NetLog* net_log)
: should_connect_(should_connect),
is_connected_(false),
addrlist_(addrlist),
net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_SOCKET)),
weak_factory_(this) {}
// Call this method to get a closure which will trigger the connect callback
// when called. The closure can be called even after the socket is deleted; it
// will safely do nothing.
base::Closure GetConnectCallback() {
return base::Bind(&MockTriggerableClientSocket::DoCallback,
weak_factory_.GetWeakPtr());
}
static std::unique_ptr<StreamSocket> MakeMockPendingClientSocket(
const AddressList& addrlist,
bool should_connect,
net::NetLog* net_log) {
std::unique_ptr<MockTriggerableClientSocket> socket(
new MockTriggerableClientSocket(addrlist, should_connect, net_log));
base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
socket->GetConnectCallback());
return std::move(socket);
}
static std::unique_ptr<StreamSocket> MakeMockDelayedClientSocket(
const AddressList& addrlist,
bool should_connect,
const base::TimeDelta& delay,
net::NetLog* net_log) {
std::unique_ptr<MockTriggerableClientSocket> socket(
new MockTriggerableClientSocket(addrlist, should_connect, net_log));
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, socket->GetConnectCallback(), delay);
return std::move(socket);
}
static std::unique_ptr<StreamSocket> MakeMockStalledClientSocket(
const AddressList& addrlist,
net::NetLog* net_log,
bool failing) {
std::unique_ptr<MockTriggerableClientSocket> socket(
new MockTriggerableClientSocket(addrlist, true, net_log));
if (failing) {
DCHECK_LE(1u, addrlist.size());
ConnectionAttempts attempts;
attempts.push_back(ConnectionAttempt(addrlist[0], ERR_CONNECTION_FAILED));
socket->AddConnectionAttempts(attempts);
}
return std::move(socket);
}
// StreamSocket implementation.
int Connect(const CompletionCallback& callback) override {
DCHECK(callback_.is_null());
callback_ = callback;
return ERR_IO_PENDING;
}
void Disconnect() override {}
bool IsConnected() const override { return is_connected_; }
bool IsConnectedAndIdle() const override { return is_connected_; }
int GetPeerAddress(IPEndPoint* address) const override {
*address = addrlist_.front();
return OK;
}
int GetLocalAddress(IPEndPoint* address) const override {
if (!is_connected_)
return ERR_SOCKET_NOT_CONNECTED;
if (addrlist_.front().GetFamily() == ADDRESS_FAMILY_IPV4)
SetIPv4Address(address);
else
SetIPv6Address(address);
return OK;
}
const BoundNetLog& NetLog() const override { return net_log_; }
void SetSubresourceSpeculation() override {}
void SetOmniboxSpeculation() override {}
bool WasEverUsed() const override { return false; }
void EnableTCPFastOpenIfSupported() override {}
bool WasNpnNegotiated() const override { return false; }
NextProto GetNegotiatedProtocol() const override { return kProtoUnknown; }
bool GetSSLInfo(SSLInfo* ssl_info) override { return false; }
void GetConnectionAttempts(ConnectionAttempts* out) const override {
*out = connection_attempts_;
}
void ClearConnectionAttempts() override { connection_attempts_.clear(); }
void AddConnectionAttempts(const ConnectionAttempts& attempts) override {
connection_attempts_.insert(connection_attempts_.begin(), attempts.begin(),
attempts.end());
}
int64_t GetTotalReceivedBytes() const override {
NOTIMPLEMENTED();
return 0;
}
// Socket implementation.
int Read(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int Write(IOBuffer* buf,
int buf_len,
const CompletionCallback& callback) override {
return ERR_FAILED;
}
int SetReceiveBufferSize(int32_t size) override { return OK; }
int SetSendBufferSize(int32_t size) override { return OK; }
private:
void DoCallback() {
is_connected_ = should_connect_;
callback_.Run(is_connected_ ? OK : ERR_CONNECTION_FAILED);
}
bool should_connect_;
bool is_connected_;
const AddressList addrlist_;
BoundNetLog net_log_;
CompletionCallback callback_;
ConnectionAttempts connection_attempts_;
base::WeakPtrFactory<MockTriggerableClientSocket> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MockTriggerableClientSocket);
};
} // namespace
void TestLoadTimingInfoConnectedReused(const ClientSocketHandle& handle) {
LoadTimingInfo load_timing_info;
// Only pass true in as |is_reused|, as in general, HttpStream types should
// have stricter concepts of reuse than socket pools.
EXPECT_TRUE(handle.GetLoadTimingInfo(true, &load_timing_info));
EXPECT_TRUE(load_timing_info.socket_reused);
EXPECT_NE(NetLog::Source::kInvalidId, load_timing_info.socket_log_id);
ExpectConnectTimingHasNoTimes(load_timing_info.connect_timing);
ExpectLoadTimingHasOnlyConnectionTimes(load_timing_info);
}
void TestLoadTimingInfoConnectedNotReused(const ClientSocketHandle& handle) {
EXPECT_FALSE(handle.is_reused());
LoadTimingInfo load_timing_info;
EXPECT_TRUE(handle.GetLoadTimingInfo(false, &load_timing_info));
EXPECT_FALSE(load_timing_info.socket_reused);
EXPECT_NE(NetLog::Source::kInvalidId, load_timing_info.socket_log_id);
ExpectConnectTimingHasTimes(load_timing_info.connect_timing,
CONNECT_TIMING_HAS_DNS_TIMES);
ExpectLoadTimingHasOnlyConnectionTimes(load_timing_info);
TestLoadTimingInfoConnectedReused(handle);
}
void SetIPv4Address(IPEndPoint* address) {
*address = IPEndPoint(ParseIP("1.1.1.1"), 80);
}
void SetIPv6Address(IPEndPoint* address) {
*address = IPEndPoint(ParseIP("1:abcd::3:4:ff"), 80);
}
MockTransportClientSocketFactory::MockTransportClientSocketFactory(
NetLog* net_log)
: net_log_(net_log),
allocation_count_(0),
client_socket_type_(MOCK_CLIENT_SOCKET),
client_socket_types_(NULL),
client_socket_index_(0),
client_socket_index_max_(0),
delay_(base::TimeDelta::FromMilliseconds(
ClientSocketPool::kMaxConnectRetryIntervalMs)) {}
MockTransportClientSocketFactory::~MockTransportClientSocketFactory() {}
std::unique_ptr<DatagramClientSocket>
MockTransportClientSocketFactory::CreateDatagramClientSocket(
DatagramSocket::BindType bind_type,
const RandIntCallback& rand_int_cb,
NetLog* net_log,
const NetLog::Source& source) {
NOTREACHED();
return std::unique_ptr<DatagramClientSocket>();
}
std::unique_ptr<StreamSocket>
MockTransportClientSocketFactory::CreateTransportClientSocket(
const AddressList& addresses,
std::unique_ptr<SocketPerformanceWatcher> /* socket_performance_watcher */,
NetLog* /* net_log */,
const NetLog::Source& /* source */) {
allocation_count_++;
ClientSocketType type = client_socket_type_;
if (client_socket_types_ && client_socket_index_ < client_socket_index_max_) {
type = client_socket_types_[client_socket_index_++];
}
switch (type) {
case MOCK_CLIENT_SOCKET:
return std::unique_ptr<StreamSocket>(
new MockConnectClientSocket(addresses, net_log_));
case MOCK_FAILING_CLIENT_SOCKET:
return std::unique_ptr<StreamSocket>(
new MockFailingClientSocket(addresses, net_log_));
case MOCK_PENDING_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockPendingClientSocket(
addresses, true, net_log_);
case MOCK_PENDING_FAILING_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockPendingClientSocket(
addresses, false, net_log_);
case MOCK_DELAYED_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockDelayedClientSocket(
addresses, true, delay_, net_log_);
case MOCK_DELAYED_FAILING_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockDelayedClientSocket(
addresses, false, delay_, net_log_);
case MOCK_STALLED_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockStalledClientSocket(
addresses, net_log_, false);
case MOCK_STALLED_FAILING_CLIENT_SOCKET:
return MockTriggerableClientSocket::MakeMockStalledClientSocket(
addresses, net_log_, true);
case MOCK_TRIGGERABLE_CLIENT_SOCKET: {
std::unique_ptr<MockTriggerableClientSocket> rv(
new MockTriggerableClientSocket(addresses, true, net_log_));
triggerable_sockets_.push(rv->GetConnectCallback());
// run_loop_quit_closure_ behaves like a condition variable. It will
// wake up WaitForTriggerableSocketCreation() if it is sleeping. We
// don't need to worry about atomicity because this code is
// single-threaded.
if (!run_loop_quit_closure_.is_null())
run_loop_quit_closure_.Run();
return std::move(rv);
}
default:
NOTREACHED();
return std::unique_ptr<StreamSocket>(
new MockConnectClientSocket(addresses, net_log_));
}
}
std::unique_ptr<SSLClientSocket>
MockTransportClientSocketFactory::CreateSSLClientSocket(
std::unique_ptr<ClientSocketHandle> transport_socket,
const HostPortPair& host_and_port,
const SSLConfig& ssl_config,
const SSLClientSocketContext& context) {
NOTIMPLEMENTED();
return std::unique_ptr<SSLClientSocket>();
}
void MockTransportClientSocketFactory::ClearSSLSessionCache() {
NOTIMPLEMENTED();
}
void MockTransportClientSocketFactory::set_client_socket_types(
ClientSocketType* type_list,
int num_types) {
DCHECK_GT(num_types, 0);
client_socket_types_ = type_list;
client_socket_index_ = 0;
client_socket_index_max_ = num_types;
}
base::Closure
MockTransportClientSocketFactory::WaitForTriggerableSocketCreation() {
while (triggerable_sockets_.empty()) {
base::RunLoop run_loop;
run_loop_quit_closure_ = run_loop.QuitClosure();
run_loop.Run();
run_loop_quit_closure_.Reset();
}
base::Closure trigger = triggerable_sockets_.front();
triggerable_sockets_.pop();
return trigger;
}
} // namespace net
| axinging/chromium-crosswalk | net/socket/transport_client_socket_pool_test_util.cc | C++ | bsd-3-clause | 16,065 |
//
(function (root) {
"use strict";
if (!root.lux)
root.lux = {};
// If a file assign http as protocol (https does not work with PhantomJS)
var protocol = root.location ? (root.location.protocol === 'file:' ? 'http:' : '') : '',
end = '.js',
ostring = Object.prototype.toString,
lux = root.lux;
function isArray(it) {
return ostring.call(it) === '[object Array]';
}
function minify () {
if (root.lux.context)
return lux.context.MINIFIED_MEDIA;
}
function baseUrl () {
if (root.lux.context)
return lux.context.MEDIA_URL;
}
function extend (o1, o2) {
if (o2) {
for (var key in o2) {
if (o2.hasOwnProperty(key))
o1[key] = o2[key];
}
}
return o1;
}
function defaultPaths () {
return {
"angular": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular",
"angular-animate": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-animate",
"angular-mocks": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-mocks.js",
"angular-sanitize": "//ajax.googleapis.com/ajax/libs/angularjs/1.3.15/angular-sanitize",
"angular-touch": "//cdnjs.cloudflare.com/ajax/libs/angular.js/1.3.15/angular-touch",
"angular-strap": "//cdnjs.cloudflare.com/ajax/libs/angular-strap/2.2.1/angular-strap",
"angular-strap-tpl": "//cdnjs.cloudflare.com/ajax/libs/angular-strap/2.2.4/angular-strap.tpl",
"angular-ui-router": "//cdnjs.cloudflare.com/ajax/libs/angular-ui-router/0.2.14/angular-ui-router",
"angular-pusher": "//cdn.jsdelivr.net/angular.pusher/latest/pusher-angular.min.js",
"async": "//cdnjs.cloudflare.com/ajax/libs/requirejs-async/0.1.1/async.js",
"pusher": "//js.pusher.com/2.2/pusher",
"codemirror": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/codemirror",
"codemirror-markdown": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/markdown/markdown",
"codemirror-javascript": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/javascript/javascript",
"codemirror-xml": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/xml/xml",
"codemirror-css": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/css/css",
"codemirror-htmlmixed": "//cdnjs.cloudflare.com/ajax/libs/codemirror/3.21.0/mode/htmlmixed/htmlmixed",
"crossfilter": "//cdnjs.cloudflare.com/ajax/libs/crossfilter/1.3.11/crossfilter",
"d3": "//cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3",
"google-analytics": "//www.google-analytics.com/analytics.js",
"gridster": "//cdnjs.cloudflare.com/ajax/libs/jquery.gridster/0.5.6/jquery.gridster",
"holder": "//cdnjs.cloudflare.com/ajax/libs/holder/2.3.1/holder",
"highlight": "//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.3/highlight.min.js",
"katex": "//cdnjs.cloudflare.com/ajax/libs/KaTeX/0.3.0/katex.min.js",
"leaflet": "//cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js",
"lodash": "//cdnjs.cloudflare.com/ajax/libs/lodash.js/2.4.1/lodash",
"marked": "//cdnjs.cloudflare.com/ajax/libs/marked/0.3.2/marked",
"mathjax": "//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",
"moment": "//cdnjs.cloudflare.com/ajax/libs/moment.js/2.10.3/moment",
"restangular": "//cdnjs.cloudflare.com/ajax/libs/restangular/1.4.0/restangular",
"sockjs": "//cdnjs.cloudflare.com/ajax/libs/sockjs-client/0.3.4/sockjs.min.js",
"stats": "//cdnjs.cloudflare.com/ajax/libs/stats.js/r11/Stats",
"topojson": "//cdnjs.cloudflare.com/ajax/libs/topojson/1.6.19/topojson"
};
}
// Default shims
function defaultShim () {
return {
angular: {
exports: "angular"
},
"angular-strap-tpl": {
deps: ["angular", "angular-strap"]
},
"google-analytics": {
exports: root.GoogleAnalyticsObject || "ga"
},
highlight: {
exports: "hljs"
},
lux: {
deps: ["angular"]
},
"ui-bootstrap": {
deps: ["angular"]
},
"codemirror": {
exports: "CodeMirror"
},
"codemirror-markdown": {
deps: ["codemirror"]
},
"codemirror-xml": {
deps: ["codemirror"]
},
"codemirror-javascript": {
deps: ["codemirror"]
},
"codemirror-css": {
deps: ["codemirror"]
},
"codemirror-htmlmixed": {
deps: ["codemirror", "codemirror-xml", "codemirror-javascript", "codemirror-css"],
},
restangular: {
deps: ["angular"]
},
crossfilter: {
exports: "crossfilter"
},
trianglify: {
deps: ["d3"],
exports: "Trianglify"
},
mathjax: {
exports: "MathJax"
}
};
}
function newPaths (cfg) {
var all = {},
min = minify() ? '.min' : '',
prefix = root.local_require_prefix,
paths = extend(defaultPaths(), cfg.paths);
for(var name in paths) {
if(paths.hasOwnProperty(name)) {
var path = paths[name];
if (prefix && path.substring(0, prefix.length) === prefix)
path = path.substring(prefix.length);
if (!cfg.shim[name]) {
// Add angular dependency
if (name.substring(0, 8) === "angular-")
cfg.shim[name] = {
deps: ["angular"]
};
else if (name.substring(0, 3) === "d3-")
cfg.shim[name] = {
deps: ["d3"]
};
}
if (typeof(path) !== 'string') {
// Don't maanipulate it, live it as it is
path = path.url;
} else {
var params = path.split('?');
if (params.length === 2) {
path = params[0];
params = params[1];
} else
params = '';
if (path.substring(path.length-3) !== end)
path += min;
if (params) {
if (path.substring(path.length-3) !== end)
path += end;
path += '?' + params;
}
// Add protocol
if (path.substring(0, 2) === '//' && protocol)
path = protocol + path;
if (path.substring(path.length-3) === end)
path = path.substring(0, path.length-3);
}
all[name] = path;
}
}
return all;
}
// require.config override
lux.config = function (cfg) {
if(!cfg.baseUrl) {
var url = baseUrl();
if (url !== undefined) cfg.baseUrl = url;
}
cfg.shim = extend(defaultShim(), cfg.shim);
cfg.paths = newPaths(cfg);
require.config(cfg);
};
}(this));
lux.config({});
require(['angular'], function (angular) {
angular.module('twitter-example', ['templates-tweets'])
.directive('twitter', ['$rootScope', '$log', function (root, log) {
function connectSock(scope, url) {
if (!root.websockets) root.websockets = {};
var hnd = root.websockets[url];
if (!hnd)
root.websockets[url] = hnd = createSocket(url);
return hnd;
}
function createSocket (url) {
var sock = new WebSocket(url),
listeners = [];
sock.onopen = function() {
log.info('New connection with ' + url);
};
sock.onmessage = function (e) {
var msg = angular.fromJson(e.data);
msg.timestamp = +msg.timestamp;
msg.url = 'https://twitter.com/' + msg.user.screen_name + '/status/' + msg.id_str;
angular.forEach(listeners, function (listener) {
listener(sock, msg);
});
};
return {
sock: sock,
listeners: listeners
};
}
// Closure which handle incoming messages fro the server
function tweetArrived (scope) {
return function (sock, msg) {
scope.messages.push(msg);
scope.$apply();
};
}
return {
restrict: 'AE',
templateUrl: 'tweets/templates/tweets.tpl.html',
link: function (scope, element, attrs) {
var options = attrs.twitter;
if (options) options = angular.fromJson(options);
scope.messages = [];
if (options && options.url) {
var hnd = connectSock(scope, options.url);
hnd.listeners.push(tweetArrived(scope));
} else
log.error('Twitter directive improperly configured, no url found');
}
};
}]);
angular.module('templates-tweets', ['tweets/templates/tweets.tpl.html']);
angular.module("tweets/templates/tweets.tpl.html", []).run(["$templateCache", function($templateCache) {
$templateCache.put("tweets/templates/tweets.tpl.html",
"<div class=\"media\" ng-repeat=\"msg in messages | orderBy: ['-timestamp']\">\n" +
" <div class=\"media-left\">\n" +
" <a ng-href=\"{{ msg.url }}\">\n" +
" <img class=\"media-object\" ng-src=\"{{msg.user.profile_image_url_https}}\"\n" +
" alt=\"{{msg.user.name}}\" class=\"img-thumbnail\">\n" +
" </a>\n" +
" </div>\n" +
" <div class=\"media-body\">\n" +
" <p class='list-group-item-text message'>{{msg.text}}</p>\n" +
" </div>\n" +
"</div>\n" +
"");
}]);
//
// Angular bootstrap
angular.bootstrap(document, ['twitter-example']);
});
| dejlek/pulsar | examples/tweets/assets/tweets.js | JavaScript | bsd-3-clause | 10,267 |
//===- X86AvoidStoreForwardingBlockis.cpp - Avoid HW Store Forward Block --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// If a load follows a store and reloads data that the store has written to
// memory, Intel microarchitectures can in many cases forward the data directly
// from the store to the load, This "store forwarding" saves cycles by enabling
// the load to directly obtain the data instead of accessing the data from
// cache or memory.
// A "store forward block" occurs in cases that a store cannot be forwarded to
// the load. The most typical case of store forward block on Intel Core
// microarchitecture that a small store cannot be forwarded to a large load.
// The estimated penalty for a store forward block is ~13 cycles.
//
// This pass tries to recognize and handle cases where "store forward block"
// is created by the compiler when lowering memcpy calls to a sequence
// of a load and a store.
//
// The pass currently only handles cases where memcpy is lowered to
// XMM/YMM registers, it tries to break the memcpy into smaller copies.
// breaking the memcpy should be possible since there is no atomicity
// guarantee for loads and stores to XMM/YMM.
//
// It could be better for performance to solve the problem by loading
// to XMM/YMM then inserting the partial store before storing back from XMM/YMM
// to memory, but this will result in a more conservative optimization since it
// requires we prove that all memory accesses between the blocking store and the
// load must alias/don't alias before we can move the store, whereas the
// transformation done here is correct regardless to other memory accesses.
//===----------------------------------------------------------------------===//
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/InitializePasses.h"
#include "llvm/MC/MCInstrDesc.h"
using namespace llvm;
#define DEBUG_TYPE "x86-avoid-SFB"
static cl::opt<bool> DisableX86AvoidStoreForwardBlocks(
"x86-disable-avoid-SFB", cl::Hidden,
cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false));
static cl::opt<unsigned> X86AvoidSFBInspectionLimit(
"x86-sfb-inspection-limit",
cl::desc("X86: Number of instructions backward to "
"inspect for store forwarding blocks."),
cl::init(20), cl::Hidden);
namespace {
using DisplacementSizeMap = std::map<int64_t, unsigned>;
class X86AvoidSFBPass : public MachineFunctionPass {
public:
static char ID;
X86AvoidSFBPass() : MachineFunctionPass(ID) { }
StringRef getPassName() const override {
return "X86 Avoid Store Forwarding Blocks";
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AAResultsWrapperPass>();
}
private:
MachineRegisterInfo *MRI = nullptr;
const X86InstrInfo *TII = nullptr;
const X86RegisterInfo *TRI = nullptr;
SmallVector<std::pair<MachineInstr *, MachineInstr *>, 2>
BlockedLoadsStoresPairs;
SmallVector<MachineInstr *, 2> ForRemoval;
AliasAnalysis *AA = nullptr;
/// Returns couples of Load then Store to memory which look
/// like a memcpy.
void findPotentiallylBlockedCopies(MachineFunction &MF);
/// Break the memcpy's load and store into smaller copies
/// such that each memory load that was blocked by a smaller store
/// would now be copied separately.
void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst,
const DisplacementSizeMap &BlockingStoresDispSizeMap);
/// Break a copy of size Size to smaller copies.
void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm,
MachineInstr *StoreInst, int64_t StDispImm,
int64_t LMMOffset, int64_t SMMOffset);
void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp,
MachineInstr *StoreInst, unsigned NStoreOpcode,
int64_t StoreDisp, unsigned Size, int64_t LMMOffset,
int64_t SMMOffset);
bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const;
unsigned getRegSizeInBytes(MachineInstr *Inst);
};
} // end anonymous namespace
char X86AvoidSFBPass::ID = 0;
INITIALIZE_PASS_BEGIN(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking",
false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_END(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false,
false)
FunctionPass *llvm::createX86AvoidStoreForwardingBlocks() {
return new X86AvoidSFBPass();
}
static bool isXMMLoadOpcode(unsigned Opcode) {
return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm ||
Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm ||
Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm ||
Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm ||
Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm ||
Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm ||
Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm ||
Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm;
}
static bool isYMMLoadOpcode(unsigned Opcode) {
return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm ||
Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm ||
Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm ||
Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm ||
Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm ||
Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm ||
Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm;
}
static bool isPotentialBlockedMemCpyLd(unsigned Opcode) {
return isXMMLoadOpcode(Opcode) || isYMMLoadOpcode(Opcode);
}
static bool isPotentialBlockedMemCpyPair(int LdOpcode, int StOpcode) {
switch (LdOpcode) {
case X86::MOVUPSrm:
case X86::MOVAPSrm:
return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr;
case X86::VMOVUPSrm:
case X86::VMOVAPSrm:
return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr;
case X86::VMOVUPDrm:
case X86::VMOVAPDrm:
return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr;
case X86::VMOVDQUrm:
case X86::VMOVDQArm:
return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr;
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm:
return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr;
case X86::VMOVUPDZ128rm:
case X86::VMOVAPDZ128rm:
return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr;
case X86::VMOVUPSYrm:
case X86::VMOVAPSYrm:
return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr;
case X86::VMOVUPDYrm:
case X86::VMOVAPDYrm:
return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr;
case X86::VMOVDQUYrm:
case X86::VMOVDQAYrm:
return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr;
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm:
return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr;
case X86::VMOVUPDZ256rm:
case X86::VMOVAPDZ256rm:
return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr;
case X86::VMOVDQU64Z128rm:
case X86::VMOVDQA64Z128rm:
return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr;
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA32Z128rm:
return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr;
case X86::VMOVDQU64Z256rm:
case X86::VMOVDQA64Z256rm:
return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr;
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA32Z256rm:
return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr;
default:
return false;
}
}
static bool isPotentialBlockingStoreInst(int Opcode, int LoadOpcode) {
bool PBlock = false;
PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 ||
Opcode == X86::MOV32mr || Opcode == X86::MOV32mi ||
Opcode == X86::MOV16mr || Opcode == X86::MOV16mi ||
Opcode == X86::MOV8mr || Opcode == X86::MOV8mi;
if (isYMMLoadOpcode(LoadOpcode))
PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr ||
Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr ||
Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr ||
Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr ||
Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr ||
Opcode == X86::VMOVDQU64Z128mr ||
Opcode == X86::VMOVDQA64Z128mr ||
Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr;
return PBlock;
}
static const int MOV128SZ = 16;
static const int MOV64SZ = 8;
static const int MOV32SZ = 4;
static const int MOV16SZ = 2;
static const int MOV8SZ = 1;
static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode) {
switch (LoadOpcode) {
case X86::VMOVUPSYrm:
case X86::VMOVAPSYrm:
return X86::VMOVUPSrm;
case X86::VMOVUPDYrm:
case X86::VMOVAPDYrm:
return X86::VMOVUPDrm;
case X86::VMOVDQUYrm:
case X86::VMOVDQAYrm:
return X86::VMOVDQUrm;
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm:
return X86::VMOVUPSZ128rm;
case X86::VMOVUPDZ256rm:
case X86::VMOVAPDZ256rm:
return X86::VMOVUPDZ128rm;
case X86::VMOVDQU64Z256rm:
case X86::VMOVDQA64Z256rm:
return X86::VMOVDQU64Z128rm;
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA32Z256rm:
return X86::VMOVDQU32Z128rm;
default:
llvm_unreachable("Unexpected Load Instruction Opcode");
}
return 0;
}
static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) {
switch (StoreOpcode) {
case X86::VMOVUPSYmr:
case X86::VMOVAPSYmr:
return X86::VMOVUPSmr;
case X86::VMOVUPDYmr:
case X86::VMOVAPDYmr:
return X86::VMOVUPDmr;
case X86::VMOVDQUYmr:
case X86::VMOVDQAYmr:
return X86::VMOVDQUmr;
case X86::VMOVUPSZ256mr:
case X86::VMOVAPSZ256mr:
return X86::VMOVUPSZ128mr;
case X86::VMOVUPDZ256mr:
case X86::VMOVAPDZ256mr:
return X86::VMOVUPDZ128mr;
case X86::VMOVDQU64Z256mr:
case X86::VMOVDQA64Z256mr:
return X86::VMOVDQU64Z128mr;
case X86::VMOVDQU32Z256mr:
case X86::VMOVDQA32Z256mr:
return X86::VMOVDQU32Z128mr;
default:
llvm_unreachable("Unexpected Load Instruction Opcode");
}
return 0;
}
static int getAddrOffset(MachineInstr *MI) {
const MCInstrDesc &Descl = MI->getDesc();
int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags);
assert(AddrOffset != -1 && "Expected Memory Operand");
AddrOffset += X86II::getOperandBias(Descl);
return AddrOffset;
}
static MachineOperand &getBaseOperand(MachineInstr *MI) {
int AddrOffset = getAddrOffset(MI);
return MI->getOperand(AddrOffset + X86::AddrBaseReg);
}
static MachineOperand &getDispOperand(MachineInstr *MI) {
int AddrOffset = getAddrOffset(MI);
return MI->getOperand(AddrOffset + X86::AddrDisp);
}
// Relevant addressing modes contain only base register and immediate
// displacement or frameindex and immediate displacement.
// TODO: Consider expanding to other addressing modes in the future
static bool isRelevantAddressingMode(MachineInstr *MI) {
int AddrOffset = getAddrOffset(MI);
MachineOperand &Base = getBaseOperand(MI);
MachineOperand &Disp = getDispOperand(MI);
MachineOperand &Scale = MI->getOperand(AddrOffset + X86::AddrScaleAmt);
MachineOperand &Index = MI->getOperand(AddrOffset + X86::AddrIndexReg);
MachineOperand &Segment = MI->getOperand(AddrOffset + X86::AddrSegmentReg);
if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI()))
return false;
if (!Disp.isImm())
return false;
if (Scale.getImm() != 1)
return false;
if (!(Index.isReg() && Index.getReg() == X86::NoRegister))
return false;
if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister))
return false;
return true;
}
// Collect potentially blocking stores.
// Limit the number of instructions backwards we want to inspect
// since the effect of store block won't be visible if the store
// and load instructions have enough instructions in between to
// keep the core busy.
static SmallVector<MachineInstr *, 2>
findPotentialBlockers(MachineInstr *LoadInst) {
SmallVector<MachineInstr *, 2> PotentialBlockers;
unsigned BlockCount = 0;
const unsigned InspectionLimit = X86AvoidSFBInspectionLimit;
for (auto PBInst = std::next(MachineBasicBlock::reverse_iterator(LoadInst)),
E = LoadInst->getParent()->rend();
PBInst != E; ++PBInst) {
if (PBInst->isMetaInstruction())
continue;
BlockCount++;
if (BlockCount >= InspectionLimit)
break;
MachineInstr &MI = *PBInst;
if (MI.getDesc().isCall())
return PotentialBlockers;
PotentialBlockers.push_back(&MI);
}
// If we didn't get to the instructions limit try predecessing blocks.
// Ideally we should traverse the predecessor blocks in depth with some
// coloring algorithm, but for now let's just look at the first order
// predecessors.
if (BlockCount < InspectionLimit) {
MachineBasicBlock *MBB = LoadInst->getParent();
int LimitLeft = InspectionLimit - BlockCount;
for (MachineBasicBlock::pred_iterator PB = MBB->pred_begin(),
PE = MBB->pred_end();
PB != PE; ++PB) {
MachineBasicBlock *PMBB = *PB;
int PredCount = 0;
for (MachineBasicBlock::reverse_iterator PBInst = PMBB->rbegin(),
PME = PMBB->rend();
PBInst != PME; ++PBInst) {
if (PBInst->isMetaInstruction())
continue;
PredCount++;
if (PredCount >= LimitLeft)
break;
if (PBInst->getDesc().isCall())
break;
PotentialBlockers.push_back(&*PBInst);
}
}
}
return PotentialBlockers;
}
void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
int64_t LoadDisp, MachineInstr *StoreInst,
unsigned NStoreOpcode, int64_t StoreDisp,
unsigned Size, int64_t LMMOffset,
int64_t SMMOffset) {
MachineOperand &LoadBase = getBaseOperand(LoadInst);
MachineOperand &StoreBase = getBaseOperand(StoreInst);
MachineBasicBlock *MBB = LoadInst->getParent();
MachineMemOperand *LMMO = *LoadInst->memoperands_begin();
MachineMemOperand *SMMO = *StoreInst->memoperands_begin();
Register Reg1 = MRI->createVirtualRegister(
TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent())));
MachineInstr *NewLoad =
BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode),
Reg1)
.add(LoadBase)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(LoadDisp)
.addReg(X86::NoRegister)
.addMemOperand(
MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size));
if (LoadBase.isReg())
getBaseOperand(NewLoad).setIsKill(false);
LLVM_DEBUG(NewLoad->dump());
// If the load and store are consecutive, use the loadInst location to
// reduce register pressure.
MachineInstr *StInst = StoreInst;
auto PrevInstrIt = skipDebugInstructionsBackward(
std::prev(MachineBasicBlock::instr_iterator(StoreInst)),
MBB->instr_begin());
if (PrevInstrIt.getNodePtr() == LoadInst)
StInst = LoadInst;
MachineInstr *NewStore =
BuildMI(*MBB, StInst, StInst->getDebugLoc(), TII->get(NStoreOpcode))
.add(StoreBase)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(StoreDisp)
.addReg(X86::NoRegister)
.addReg(Reg1)
.addMemOperand(
MBB->getParent()->getMachineMemOperand(SMMO, SMMOffset, Size));
if (StoreBase.isReg())
getBaseOperand(NewStore).setIsKill(false);
MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands);
assert(StoreSrcVReg.isReg() && "Expected virtual register");
NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill());
LLVM_DEBUG(NewStore->dump());
}
void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst,
int64_t LdDispImm, MachineInstr *StoreInst,
int64_t StDispImm, int64_t LMMOffset,
int64_t SMMOffset) {
int LdDisp = LdDispImm;
int StDisp = StDispImm;
while (Size > 0) {
if ((Size - MOV128SZ >= 0) && isYMMLoadOpcode(LoadInst->getOpcode())) {
Size = Size - MOV128SZ;
buildCopy(LoadInst, getYMMtoXMMLoadOpcode(LoadInst->getOpcode()), LdDisp,
StoreInst, getYMMtoXMMStoreOpcode(StoreInst->getOpcode()),
StDisp, MOV128SZ, LMMOffset, SMMOffset);
LdDisp += MOV128SZ;
StDisp += MOV128SZ;
LMMOffset += MOV128SZ;
SMMOffset += MOV128SZ;
continue;
}
if (Size - MOV64SZ >= 0) {
Size = Size - MOV64SZ;
buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp,
MOV64SZ, LMMOffset, SMMOffset);
LdDisp += MOV64SZ;
StDisp += MOV64SZ;
LMMOffset += MOV64SZ;
SMMOffset += MOV64SZ;
continue;
}
if (Size - MOV32SZ >= 0) {
Size = Size - MOV32SZ;
buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp,
MOV32SZ, LMMOffset, SMMOffset);
LdDisp += MOV32SZ;
StDisp += MOV32SZ;
LMMOffset += MOV32SZ;
SMMOffset += MOV32SZ;
continue;
}
if (Size - MOV16SZ >= 0) {
Size = Size - MOV16SZ;
buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp,
MOV16SZ, LMMOffset, SMMOffset);
LdDisp += MOV16SZ;
StDisp += MOV16SZ;
LMMOffset += MOV16SZ;
SMMOffset += MOV16SZ;
continue;
}
if (Size - MOV8SZ >= 0) {
Size = Size - MOV8SZ;
buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp,
MOV8SZ, LMMOffset, SMMOffset);
LdDisp += MOV8SZ;
StDisp += MOV8SZ;
LMMOffset += MOV8SZ;
SMMOffset += MOV8SZ;
continue;
}
}
assert(Size == 0 && "Wrong size division");
}
static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst) {
MachineOperand &LoadBase = getBaseOperand(LoadInst);
MachineOperand &StoreBase = getBaseOperand(StoreInst);
auto StorePrevNonDbgInstr = skipDebugInstructionsBackward(
std::prev(MachineBasicBlock::instr_iterator(StoreInst)),
LoadInst->getParent()->instr_begin()).getNodePtr();
if (LoadBase.isReg()) {
MachineInstr *LastLoad = LoadInst->getPrevNode();
// If the original load and store to xmm/ymm were consecutive
// then the partial copies were also created in
// a consecutive order to reduce register pressure,
// and the location of the last load is before the last store.
if (StorePrevNonDbgInstr == LoadInst)
LastLoad = LoadInst->getPrevNode()->getPrevNode();
getBaseOperand(LastLoad).setIsKill(LoadBase.isKill());
}
if (StoreBase.isReg()) {
MachineInstr *StInst = StoreInst;
if (StorePrevNonDbgInstr == LoadInst)
StInst = LoadInst;
getBaseOperand(StInst->getPrevNode()).setIsKill(StoreBase.isKill());
}
}
bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1,
const MachineMemOperand &Op2) const {
if (!Op1.getValue() || !Op2.getValue())
return true;
int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset());
int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset;
int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset;
AliasResult AAResult =
AA->alias(MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()),
MemoryLocation(Op2.getValue(), Overlapb, Op2.getAAInfo()));
return AAResult != NoAlias;
}
void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
for (auto &MBB : MF)
for (auto &MI : MBB) {
if (!isPotentialBlockedMemCpyLd(MI.getOpcode()))
continue;
int DefVR = MI.getOperand(0).getReg();
if (!MRI->hasOneNonDBGUse(DefVR))
continue;
for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end();
UI != UE;) {
MachineOperand &StoreMO = *UI++;
MachineInstr &StoreMI = *StoreMO.getParent();
// Skip cases where the memcpy may overlap.
if (StoreMI.getParent() == MI.getParent() &&
isPotentialBlockedMemCpyPair(MI.getOpcode(), StoreMI.getOpcode()) &&
isRelevantAddressingMode(&MI) &&
isRelevantAddressingMode(&StoreMI)) {
assert(MI.hasOneMemOperand() &&
"Expected one memory operand for load instruction");
assert(StoreMI.hasOneMemOperand() &&
"Expected one memory operand for store instruction");
if (!alias(**MI.memoperands_begin(), **StoreMI.memoperands_begin()))
BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI));
}
}
}
}
unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
auto TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI,
*LoadInst->getParent()->getParent());
return TRI->getRegSizeInBits(*TRC) / 8;
}
void X86AvoidSFBPass::breakBlockedCopies(
MachineInstr *LoadInst, MachineInstr *StoreInst,
const DisplacementSizeMap &BlockingStoresDispSizeMap) {
int64_t LdDispImm = getDispOperand(LoadInst).getImm();
int64_t StDispImm = getDispOperand(StoreInst).getImm();
int64_t LMMOffset = 0;
int64_t SMMOffset = 0;
int64_t LdDisp1 = LdDispImm;
int64_t LdDisp2 = 0;
int64_t StDisp1 = StDispImm;
int64_t StDisp2 = 0;
unsigned Size1 = 0;
unsigned Size2 = 0;
int64_t LdStDelta = StDispImm - LdDispImm;
for (auto DispSizePair : BlockingStoresDispSizeMap) {
LdDisp2 = DispSizePair.first;
StDisp2 = DispSizePair.first + LdStDelta;
Size2 = DispSizePair.second;
// Avoid copying overlapping areas.
if (LdDisp2 < LdDisp1) {
int OverlapDelta = LdDisp1 - LdDisp2;
LdDisp2 += OverlapDelta;
StDisp2 += OverlapDelta;
Size2 -= OverlapDelta;
}
Size1 = LdDisp2 - LdDisp1;
// Build a copy for the point until the current blocking store's
// displacement.
buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
SMMOffset);
// Build a copy for the current blocking store.
buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1,
SMMOffset + Size1);
LdDisp1 = LdDisp2 + Size2;
StDisp1 = StDisp2 + Size2;
LMMOffset += Size1 + Size2;
SMMOffset += Size1 + Size2;
}
unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1;
buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
LMMOffset);
}
static bool hasSameBaseOpValue(MachineInstr *LoadInst,
MachineInstr *StoreInst) {
MachineOperand &LoadBase = getBaseOperand(LoadInst);
MachineOperand &StoreBase = getBaseOperand(StoreInst);
if (LoadBase.isReg() != StoreBase.isReg())
return false;
if (LoadBase.isReg())
return LoadBase.getReg() == StoreBase.getReg();
return LoadBase.getIndex() == StoreBase.getIndex();
}
static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize,
int64_t StoreDispImm, unsigned StoreSize) {
return ((StoreDispImm >= LoadDispImm) &&
(StoreDispImm <= LoadDispImm + (LoadSize - StoreSize)));
}
// Keep track of all stores blocking a load
static void
updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap,
int64_t DispImm, unsigned Size) {
if (BlockingStoresDispSizeMap.count(DispImm)) {
// Choose the smallest blocking store starting at this displacement.
if (BlockingStoresDispSizeMap[DispImm] > Size)
BlockingStoresDispSizeMap[DispImm] = Size;
} else
BlockingStoresDispSizeMap[DispImm] = Size;
}
// Remove blocking stores contained in each other.
static void
removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap) {
if (BlockingStoresDispSizeMap.size() <= 1)
return;
SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack;
for (auto DispSizePair : BlockingStoresDispSizeMap) {
int64_t CurrDisp = DispSizePair.first;
unsigned CurrSize = DispSizePair.second;
while (DispSizeStack.size()) {
int64_t PrevDisp = DispSizeStack.back().first;
unsigned PrevSize = DispSizeStack.back().second;
if (CurrDisp + CurrSize > PrevDisp + PrevSize)
break;
DispSizeStack.pop_back();
}
DispSizeStack.push_back(DispSizePair);
}
BlockingStoresDispSizeMap.clear();
for (auto Disp : DispSizeStack)
BlockingStoresDispSizeMap.insert(Disp);
}
bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
if (DisableX86AvoidStoreForwardBlocks || skipFunction(MF.getFunction()) ||
!MF.getSubtarget<X86Subtarget>().is64Bit())
return false;
MRI = &MF.getRegInfo();
assert(MRI->isSSA() && "Expected MIR to be in SSA form");
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
// Look for a load then a store to XMM/YMM which look like a memcpy
findPotentiallylBlockedCopies(MF);
for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) {
MachineInstr *LoadInst = LoadStoreInstPair.first;
int64_t LdDispImm = getDispOperand(LoadInst).getImm();
DisplacementSizeMap BlockingStoresDispSizeMap;
SmallVector<MachineInstr *, 2> PotentialBlockers =
findPotentialBlockers(LoadInst);
for (auto PBInst : PotentialBlockers) {
if (!isPotentialBlockingStoreInst(PBInst->getOpcode(),
LoadInst->getOpcode()) ||
!isRelevantAddressingMode(PBInst))
continue;
int64_t PBstDispImm = getDispOperand(PBInst).getImm();
assert(PBInst->hasOneMemOperand() && "Expected One Memory Operand");
unsigned PBstSize = (*PBInst->memoperands_begin())->getSize();
// This check doesn't cover all cases, but it will suffice for now.
// TODO: take branch probability into consideration, if the blocking
// store is in an unreached block, breaking the memcopy could lose
// performance.
if (hasSameBaseOpValue(LoadInst, PBInst) &&
isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm,
PBstSize))
updateBlockingStoresDispSizeMap(BlockingStoresDispSizeMap, PBstDispImm,
PBstSize);
}
if (BlockingStoresDispSizeMap.empty())
continue;
// We found a store forward block, break the memcpy's load and store
// into smaller copies such that each smaller store that was causing
// a store block would now be copied separately.
MachineInstr *StoreInst = LoadStoreInstPair.second;
LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n");
LLVM_DEBUG(LoadInst->dump());
LLVM_DEBUG(StoreInst->dump());
LLVM_DEBUG(dbgs() << "Replaced with:\n");
removeRedundantBlockingStores(BlockingStoresDispSizeMap);
breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
updateKillStatus(LoadInst, StoreInst);
ForRemoval.push_back(LoadInst);
ForRemoval.push_back(StoreInst);
}
for (auto RemovedInst : ForRemoval) {
RemovedInst->eraseFromParent();
}
ForRemoval.clear();
BlockedLoadsStoresPairs.clear();
LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
return Changed;
}
| endlessm/chromium-browser | third_party/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp | C++ | bsd-3-clause | 28,773 |
/**
* Copyright (C) 2016 Turi
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
/*
* Copyright (c) 2009 Carnegie Mellon University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* For more about this software visit:
*
* http://www.graphlab.ml.cmu.edu
*
*/
#ifndef GRAPHLAB_RPC_CIRCULAR_IOVEC_BUFFER_HPP
#define GRAPHLAB_RPC_CIRCULAR_IOVEC_BUFFER_HPP
#include <vector>
#include <sys/socket.h>
namespace graphlab{
namespace dc_impl {
/**
* \ingroup rpc
* \internal
* A circular buffer which maintains a parallel sequence of iovecs.
* One sequence is basic iovecs
* The other sequence is used for storing the original unomidifed pointers
* This is minimally checked. length must be a power of 2
*/
struct circular_iovec_buffer {
inline circular_iovec_buffer(size_t len = 4096) {
v.resize(4096);
parallel_v.resize(4096);
head = 0;
tail = 0;
numel = 0;
}
inline bool empty() const {
return numel == 0;
}
size_t size() const {
return numel;
}
void reserve(size_t _n) {
if (_n <= v.size()) return;
size_t originalsize = v.size();
size_t n = v.size();
// must be a power of 2
while (n <= _n) n *= 2;
v.resize(n);
parallel_v.resize(n);
if (head >= tail && numel > 0) {
// there is a loop around
// we need to fix the shift
size_t newtail = originalsize;
for (size_t i = 0;i < tail; ++i) {
v[newtail] = v[i];
parallel_v[newtail] = parallel_v[i];
++newtail;
}
tail = newtail;
}
}
inline void write(const std::vector<iovec>& other, size_t nwrite) {
reserve(numel + nwrite);
for (size_t i = 0;i < nwrite; ++i) {
v[tail] = other[i];
parallel_v[tail] = other[i];
tail = (tail + 1) & (v.size() - 1);
}
numel += nwrite;
}
/**
* Writes an entry into the buffer, resizing the buffer if necessary.
* This buffer will take over all iovec pointers and free them when done
*/
inline void write(const iovec &entry) {
if (numel == v.size()) {
reserve(2 * numel);
}
v[tail] = entry;
parallel_v[tail] = entry;
tail = (tail + 1) & (v.size() - 1); ++numel;
}
/**
* Writes an entry into the buffer, resizing the buffer if necessary.
* This buffer will take over all iovec pointers and free them when done.
* This version of write allows the iovec that is sent to be different from the
* iovec that is freed. (for instance, what is sent could be subarray of
* what is to be freed.
*/
inline void write(const iovec &entry, const iovec& actual_ptr_entry) {
if (numel == v.size()) {
reserve(2 * numel);
}
v[tail] = actual_ptr_entry;
parallel_v[tail] = entry;
tail = (tail + 1) & (v.size() - 1); ++numel;
}
/**
* Erases a single iovec from the head and free the pointer
*/
inline void erase_from_head_and_free() {
free(v[head].iov_base);
head = (head + 1) & (v.size() - 1);
--numel;
}
/**
* Fills a msghdr for unsent data.
*/
void fill_msghdr(struct msghdr& data) {
data.msg_iov = &(parallel_v[head]);
if (head < tail) {
data.msg_iovlen = tail - head;
}
else {
data.msg_iovlen = v.size() - head;
}
data.msg_iovlen = std::min<size_t>(IOV_MAX, data.msg_iovlen);
}
/**
* Advances the head as if some amount of data was sent.
*/
void sent(size_t len) {
while(len > 0) {
size_t curv_sent_len = std::min(len, parallel_v[head].iov_len);
parallel_v[head].iov_len -= curv_sent_len;
parallel_v[head].iov_base = (char*)(parallel_v[head].iov_base) + curv_sent_len;
len -= curv_sent_len;
if (parallel_v[head].iov_len == 0) {
erase_from_head_and_free();
}
}
}
std::vector<struct iovec> v;
std::vector<struct iovec> parallel_v;
size_t head;
size_t tail;
size_t numel;
};
}
}
#endif
| TobyRoseman/SFrame | oss_src/rpc/circular_iovec_buffer.hpp | C++ | bsd-3-clause | 4,545 |
<?php
// Name: /alert/confirm/index.php
// Author: Richard Allan richard@sheffieldhallam.org.uk
// Version: 0.5 beta
// Date: 6th Jan 2005
// Description: This file contains ALERT class.
// This the page users come to when they click the link in their
// confirmation email after joining the site.
// What happens? They will come here with t=23-adsf7897fd78d9sfsd200501021500
// where the value of 't' is a form of their registration token.
// This token is a salted version of their email address concatenated
// with the time the alert was created.
// We check this exists in the database and if so we run the confirm
// function of class ALERT to set the field confirmed in the table
// alerts to true.
// We then print a nice welcome message.
// This depends on there being page definitions in metadata.php
// FUNCTIONS
// confirm_success() Displays a page with a success confirmation message
// confirm_error() Displays a page with an error message
// INITIALISATION
include_once "../../../includes/easyparliament/init.php";
include_once "../../../includes/easyparliament/member.php";
include_once INCLUDESPATH . '../../../phplib/crosssell.php';
// Instantiate an instance of ALERT
$ALERT = new ALERT;
$success = $ALERT->confirm( get_http_var('t') );
if ($success) {
confirm_success($ALERT);
} else {
confirm_error();
}
// FUNCTION: confirm_success
function confirm_success ($ALERT) {
global $PAGE, $this_page, $THEUSER;
$this_page = 'alertconfirmsucceeded';
$criteria = $ALERT->criteria_pretty(true);
$email = $ALERT->email();
$extra = null;
$PAGE->page_start();
$PAGE->stripe_start();
?>
<p>Your alert has been confirmed.</p>
<p>You will now receive email alerts for the following criteria:</p>
<ul><?=$criteria?></ul>
<p>This is normally the day after, but could conceivably be later due to issues at our or aph.gov.au's end.</p>
<?php
$extra = alert_confirmation_advert(array('email'=>$email, 'pid'=>strstr($ALERT->criteria(),'speaker:')));
if ($extra)
$extra = "advert=$extra";
$PAGE->stripe_end();
$PAGE->page_end($extra);
}
// FUNCTION: confirm_error
function confirm_error() {
// Friendly error, not a normal one!
global $PAGE, $this_page;
$this_page = 'alertconfirmfailed';
$PAGE->page_start();
$PAGE->stripe_start();
?>
<p>The link you followed to reach this page appears to be incomplete.</p>
<p>If you clicked a link in your confirmation email you may need to manually copy and paste the entire link to the 'Location' bar of the web browser and try again.</p>
<p>If you still get this message, please do <a href="mailto:<?php echo CONTACTEMAIL; ?>">email us</a> and let us know, and we'll help out!</p>
<?php
$PAGE->stripe_end();
$PAGE->page_end();
}
?>
| NathanaelB/twfy | www/docs/alert/confirm/index.php | PHP | bsd-3-clause | 2,728 |
//===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "AMDGPUMachineFunction.h"
#include "AMDGPUSubtarget.h"
#include "AMDGPUPerfHintAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
using namespace llvm;
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
MachineFunctionInfo(),
LocalMemoryObjects(),
ExplicitKernArgSize(0),
LDSSize(0),
Mode(MF.getFunction(), MF.getSubtarget<GCNSubtarget>()),
IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
MemoryBound(false),
WaveLimiter(false) {
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
// FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
// except reserved size is not correctly aligned.
const Function &F = MF.getFunction();
Attribute MemBoundAttr = F.getFnAttribute("amdgpu-memory-bound");
MemoryBound = MemBoundAttr.isStringAttribute() &&
MemBoundAttr.getValueAsString() == "true";
Attribute WaveLimitAttr = F.getFnAttribute("amdgpu-wave-limiter");
WaveLimiter = WaveLimitAttr.isStringAttribute() &&
WaveLimitAttr.getValueAsString() == "true";
CallingConv::ID CC = F.getCallingConv();
if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
}
unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
const GlobalValue &GV) {
auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
if (!Entry.second)
return Entry.first->second;
unsigned Align = GV.getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(GV.getValueType());
/// TODO: We should sort these to minimize wasted space due to alignment
/// padding. Currently the padding is decided by the first encountered use
/// during lowering.
unsigned Offset = LDSSize = alignTo(LDSSize, Align);
Entry.first->second = Offset;
LDSSize += DL.getTypeAllocSize(GV.getValueType());
return Offset;
}
| endlessm/chromium-browser | third_party/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp | C++ | bsd-3-clause | 2,450 |
// Copyright 2014 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
package com.google.u2f.key.messages;
import java.util.Arrays;
public class AuthenticateResponse extends U2FResponse {
private final byte userPresence;
private final int counter;
private final byte[] signature;
public AuthenticateResponse(byte userPresence, int counter, byte[] signature) {
super();
this.userPresence = userPresence;
this.counter = counter;
this.signature = signature;
}
/**
* Bit 0 is set to 1, which means that user presence was verified. (This
* version of the protocol doesn't specify a way to request authentication
* responses without requiring user presence.) A different value of Bit 0, as
* well as Bits 1 through 7, are reserved for future use. The values of Bit 1
* through 7 SHOULD be 0
*/
public byte getUserPresence() {
return userPresence;
}
/**
* This is the big-endian representation of a counter value that the U2F token
* increments every time it performs an authentication operation.
*/
public int getCounter() {
return counter;
}
/** This is a ECDSA signature (on P-256) */
public byte[] getSignature() {
return signature;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + counter;
result = prime * result + Arrays.hashCode(signature);
result = prime * result + userPresence;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
AuthenticateResponse other = (AuthenticateResponse) obj;
if (counter != other.counter)
return false;
if (!Arrays.equals(signature, other.signature))
return false;
if (userPresence != other.userPresence)
return false;
return true;
}
}
| jshufelt/u2f-ref-code | u2f-ref-code/java/src/com/google/u2f/key/messages/AuthenticateResponse.java | Java | bsd-3-clause | 2,086 |
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
const {session, contextGroup, Protocol} =
InspectorTest.start('Tests Runtime.RemoteObject.');
function evaluate(options) {
InspectorTest.log(`'${options.expression}', ` +
`returnByValue: ${options.returnByValue || false}, ` +
`generatePreview: ${options.generatePreview || false}`);
return Protocol.Runtime.evaluate(options);
}
InspectorTest.runAsyncTestSuite([
async function testNull() {
InspectorTest.logMessage((await evaluate({
expression: 'null'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'null',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'null',
generatePreview: true
})).result);
},
async function testBoolean() {
InspectorTest.logMessage((await evaluate({
expression: 'true'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'false'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'true',
returnByValue: true,
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'true',
generatePreview: true,
})).result);
},
async function testNumber() {
InspectorTest.logMessage((await evaluate({
expression: '0 / {}'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '-0'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '0'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '1/0'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '-1/0'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '2.3456'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '2.3456',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '1/0',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({a: 1/0})',
returnByValue: true
})).result);
},
async function testUndefined() {
InspectorTest.logMessage((await evaluate({
expression: 'undefined'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'undefined',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({a : undefined})',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '([1, undefined])',
returnByValue: true
})).result);
},
async function testString() {
InspectorTest.logMessage((await evaluate({
expression: '\'Hello!\''
})).result);
InspectorTest.logMessage((await evaluate({
expression: '\'Hello!\'',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '\'Hello!\'',
generatePreview: true
})).result);
},
async function testSymbol() {
InspectorTest.logMessage((await evaluate({
expression: 'Symbol()',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'Symbol(42)',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `Symbol('abc')`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `Symbol('abc')`,
returnByValue: true
})));
},
async function testReturnByValue() {
InspectorTest.log('Empty object');
InspectorTest.logMessage((await evaluate({
expression: '({})', returnByValue: true
})).result);
InspectorTest.log('Object with properties');
InspectorTest.logMessage((await evaluate({
expression: '({a:1, b:2})', returnByValue: true
})).result);
InspectorTest.log('Object with cycle');
InspectorTest.logMessage((await evaluate({
expression: 'a = {};a.a = a; a', returnByValue: true
})).error);
InspectorTest.log('Function () => 42');
InspectorTest.logMessage((await evaluate({
expression: '() => 42', returnByValue: true
})).result);
InspectorTest.log('Symbol(42)');
InspectorTest.logMessage((await evaluate({
expression: 'Symbol(42)', returnByValue: true
})).error);
InspectorTest.log('Error object');
InspectorTest.logMessage((await evaluate({
expression: 'new Error()', returnByValue: true
})).result);
},
async function testFunction() {
InspectorTest.logMessage((await evaluate({
expression: '(() => 42)'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(function() { return 42 })'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(function name() { return 42 })'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(async function asyncName() { return 42 })'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(async () => 42)'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(function (a) { return a; }).bind(null, 42)'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'a = (function() { return 42 }); a.b = 2; a',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(function() { return 42 })',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'a = (function() { return 42 }); a.b = 2; a',
returnByValue: true
})).result);
},
async function testBigInt() {
InspectorTest.logMessage((await evaluate({
expression: '1n'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '-5n'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '1234567890123456789012345678901234567890n'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '-5n',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '-5n',
generatePreview: true
})).result);
},
async function testRegExp() {
InspectorTest.logMessage((await evaluate({
expression: '/\w+/g'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/i'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/m'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/s'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/u'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/y'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '/\w+/gimsuy'
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new RegExp('\\w+', 'g')`,
})).result);
InspectorTest.logMessage((await evaluate({
expression: `var re = new RegExp('\\w+', 'g');
re.prop = 32;
re`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `var re = new RegExp('\\w+', 'g');
re.prop = 32;
re`,
returnByValue: true
})).result);
},
async function testDate() {
let result = (await evaluate({
expression: `new Date('May 18, 1991 03:24:00')`,
generatePreview: true
})).result;
if (result.result.description === new Date('May 18, 1991 03:24:00') + '')
result.result.description = '<expected description>';
if (result.result.preview.description === new Date('May 18, 1991 03:24:00') + '')
result.result.preview.description = '<expected description>';
InspectorTest.logMessage(result);
result = (await evaluate({
expression: `new Date(2018, 9, 31)`,
generatePreview: true
})).result;
if (result.result.description === new Date(2018, 9, 31) + '')
result.result.description = '<expected description>';
if (result.result.preview.description === new Date(2018, 9, 31) + '')
result.result.preview.description = '<expected description>';
InspectorTest.logMessage(result);
result = (await evaluate({
expression: `a = new Date(2018, 9, 31); a.b = 2; a`,
generatePreview: true
})).result;
if (result.result.description === new Date(2018, 9, 31) + '')
result.result.description = '<expected description>';
if (result.result.preview.description === new Date(2018, 9, 31) + '')
result.result.preview.description = '<expected description>';
InspectorTest.logMessage(result);
},
async function testMap() {
InspectorTest.logMessage((await evaluate({
expression: 'new Map()',
generatePreview: true,
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Map([[1,2]])',
generatePreview: true,
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'a = new Map(); a.set(a, a); a',
generatePreview: true,
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a','b']])`
})).result);
InspectorTest.logMessage((await evaluate({
expression: `({ a: new Map([['a','b']]) })`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `m = new Map([['a', {b: 2}]])
m.d = 42;
m`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `m = new Map([['a', {b: 2}]])
m.d = 42;
m`,
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).values()`
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).values()`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `it = new Map([['a', {b: 2}]]).values(); it.next(); it`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).values()`,
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).entries()`
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).entries()`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `it = new Map([['a', {b: 2}]]).entries(); it.next(); it`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Map([['a', {b: 2}]]).entries()`,
returnByValue: true
})).result);
},
async function testSet() {
InspectorTest.logMessage((await evaluate({
expression: 'new Set([1])',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Set([1])',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Set([1,2,3,4,5,6,7])',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Set([1,2,3]).values()',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'it = new Set([1,2,3]).values(); it.next(); it',
generatePreview: true
})).result);
},
async function testWeakMap() {
InspectorTest.logMessage((await evaluate({
expression: 'new WeakMap()',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new WeakMap([[this, 1]])',
generatePreview: true
})).result);
},
async function testWeakSet() {
InspectorTest.logMessage((await evaluate({
expression: 'new WeakSet()',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new WeakSet([this])',
generatePreview: true
})).result);
},
async function testGenerator() {
InspectorTest.logMessage((await evaluate({
expression: 'g = (function*(){ yield 42; })(); g.a = 2; g',
generatePreview: true
})).result);
},
async function testError() {
InspectorTest.logMessage((await evaluate({
expression: 'new Error()'
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Error('abc')`
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Error('at\\nat')`
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Error('preview')`,
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new Error('preview')`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `({a: new Error('preview')})`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `a = new Error('preview and a'); a.a = 123; a`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: `a = new Error('preview and a'); a.a = 123; a`,
returnByValue: true
})).result);
},
async function testCustomError() {
InspectorTest.logMessage((await evaluate({
expression: `class CustomError extends Error {}; a = new CustomError(); delete a.stack; a`
})).result);
},
async function testCustomErrorWithMessage() {
InspectorTest.logMessage((await evaluate( {
expression: `class CustomMsgError extends Error {}; a = new CustomMsgError(); delete a.stack; a.message = 'foobar'; a`
})).result);
},
async function testProxy() {
InspectorTest.logMessage((await evaluate({
expression: 'new Proxy({}, {})'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Proxy(new Error(), {})'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Proxy({c: 3}, {d: 4})',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Proxy({a: 1}, {b: 2})',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({e: new Proxy({a: 1}, {b: 2})})',
generatePreview: true
})).result);
},
async function testPromise() {
InspectorTest.logMessage((await evaluate({
expression: 'Promise.resolve(42)'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'Promise.reject(42)'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '(async function(){})()'
})).result);
InspectorTest.logMessage((await evaluate({
expression: `Promise.resolve('a'.repeat(101))`,
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'Promise.reject(42)',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Promise(resolve => this.resolve = resolve)',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'a = Promise.resolve(42); a.b = 2; a',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({a: Promise.resolve(42)})',
generatePreview: true
})).result);
},
async function testTypedArray() {
InspectorTest.logMessage((await evaluate({
expression: 'a = new Uint8Array(2); a.b = 2; a',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Int32Array(101)',
generatePreview: true
})).result);
},
async function testArrayBuffer() {
InspectorTest.logMessage((await evaluate({
expression: 'new Uint8Array().buffer',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new Int32Array(100).buffer',
generatePreview: true
})).result);
},
async function testDataView() {
InspectorTest.logMessage((await evaluate({
expression: 'new DataView(new ArrayBuffer(16))',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new DataView(new ArrayBuffer(16), 12, 4)',
generatePreview: true
})).result);
},
async function testArray() {
InspectorTest.logMessage((await evaluate({
expression: '[]'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '[1,2,3]'
})).result);
},
async function testArrayLike() {
InspectorTest.logMessage((await evaluate({
expression: '({length: 5, splice: () => []})'
})).result);
InspectorTest.logMessage((await evaluate({
expression: `new (class Foo{constructor() {
this.length = 5;
this.splice = () => [];
}})`
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({length: -5, splice: () => []})'
})).result);
},
async function testOtherObjects() {
InspectorTest.logMessage((await evaluate({
expression: '({a: 1, b:2})'
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({a: 1, b:2})',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: '({a: 1, b:2})',
generatePreview: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new (function Foo() { this.a = 5; })'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new (function Foo() { this.a = [1,2,3]; })',
returnByValue: true
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'new (class Bar {})'
})).result);
InspectorTest.logMessage((await evaluate({
expression: 'inspector.createObjectWithAccessor(\'title\', true)',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: 'inspector.createObjectWithAccessor(\'title\', false)',
generatePreview: true
})));
// TODO(kozyatinskiy): fix this one.
InspectorTest.logMessage((await evaluate({
expression: 'inspector.createObjectWithAccessor(\'title\', true)',
returnByValue: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({get a() { return 42; }})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({set a(v) {}})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a: () => 42})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a: null})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a: true})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: -Infinity, a2: +Infinity, a3: -0, a4: NaN, a5: 1.23})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: 1234567890123456789012345678901234567890n})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: Symbol(42)})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: /abc/i})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: () => 42, a2: async () => 42})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: '({a1: ({}), a2: new (class Bar{})})',
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: `({a1: 'a'.repeat(100), a2: 'a'.repeat(101)})`,
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: `({a1: 1, a2: 2, a3: 3, a4:4, a5:5, a6: 6})`,
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: `([1,2,3])`,
generatePreview: true
})));
},
async function testArray2() {
InspectorTest.logMessage((await evaluate({
expression: `([1,2,3])`
})));
InspectorTest.logMessage((await evaluate({
expression: `([1,2,3])`,
returnByValue: true
})));
InspectorTest.logMessage((await evaluate({
expression: `([1,2,3])`,
generatePreview: true
})));
InspectorTest.logMessage((await evaluate({
expression: `({a: [1,2,3]})`,
generatePreview: true
})));
}
]);
| youtube/cobalt | third_party/v8/test/inspector/runtime/remote-object.js | JavaScript | bsd-3-clause | 21,188 |
#!/usr/bin/python
#----------------------------------------------------------------------
# This module is designed to live inside the "lldb" python package
# in the "lldb.macosx" package. To use this in the embedded python
# interpreter using "lldb" just import it:
#
# (lldb) script import lldb.macosx.heap
#----------------------------------------------------------------------
from __future__ import print_function
import lldb
import optparse
import os
import os.path
import re
import shlex
import string
import sys
import tempfile
import lldb.utils.symbolication
g_libheap_dylib_dir = None
g_libheap_dylib_dict = dict()
def get_iterate_memory_expr(
options,
process,
user_init_code,
user_return_code):
expr = '''
typedef unsigned natural_t;
typedef uintptr_t vm_size_t;
typedef uintptr_t vm_address_t;
typedef natural_t task_t;
typedef int kern_return_t;
#define KERN_SUCCESS 0
typedef void (*range_callback_t)(task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size);
'''
if options.search_vm_regions:
expr += '''
typedef int vm_prot_t;
typedef unsigned int vm_inherit_t;
typedef unsigned long long memory_object_offset_t;
typedef unsigned int boolean_t;
typedef int vm_behavior_t;
typedef uint32_t vm32_object_id_t;
typedef natural_t mach_msg_type_number_t;
typedef uint64_t mach_vm_address_t;
typedef uint64_t mach_vm_offset_t;
typedef uint64_t mach_vm_size_t;
typedef uint64_t vm_map_offset_t;
typedef uint64_t vm_map_address_t;
typedef uint64_t vm_map_size_t;
#define VM_PROT_NONE ((vm_prot_t) 0x00)
#define VM_PROT_READ ((vm_prot_t) 0x01)
#define VM_PROT_WRITE ((vm_prot_t) 0x02)
#define VM_PROT_EXECUTE ((vm_prot_t) 0x04)
typedef struct vm_region_submap_short_info_data_64_t {
vm_prot_t protection;
vm_prot_t max_protection;
vm_inherit_t inheritance;
memory_object_offset_t offset; // offset into object/map
unsigned int user_tag; // user tag on map entry
unsigned int ref_count; // obj/map mappers, etc
unsigned short shadow_depth; // only for obj
unsigned char external_pager; // only for obj
unsigned char share_mode; // see enumeration
boolean_t is_submap; // submap vs obj
vm_behavior_t behavior; // access behavior hint
vm32_object_id_t object_id; // obj/map name, not a handle
unsigned short user_wired_count;
} vm_region_submap_short_info_data_64_t;
#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 ((mach_msg_type_number_t)(sizeof(vm_region_submap_short_info_data_64_t)/sizeof(int)))'''
if user_init_code:
expr += user_init_code
expr += '''
task_t task = (task_t)mach_task_self();
mach_vm_address_t vm_region_base_addr;
mach_vm_size_t vm_region_size;
natural_t vm_region_depth;
vm_region_submap_short_info_data_64_t vm_region_info;
kern_return_t err;
for (vm_region_base_addr = 0, vm_region_size = 1; vm_region_size != 0; vm_region_base_addr += vm_region_size)
{
mach_msg_type_number_t vm_region_info_size = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
err = (kern_return_t)mach_vm_region_recurse (task,
&vm_region_base_addr,
&vm_region_size,
&vm_region_depth,
&vm_region_info,
&vm_region_info_size);
if (err)
break;
// Check all read + write regions. This will cover the thread stacks
// and any regions of memory like __DATA segments, that might contain
// data we are looking for
if (vm_region_info.protection & VM_PROT_WRITE &&
vm_region_info.protection & VM_PROT_READ)
{
baton.callback (task,
&baton,
64,
vm_region_base_addr,
vm_region_size);
}
}'''
else:
if options.search_stack:
expr += get_thread_stack_ranges_struct(process)
if options.search_segments:
expr += get_sections_ranges_struct(process)
if user_init_code:
expr += user_init_code
if options.search_heap:
expr += '''
#define MALLOC_PTR_IN_USE_RANGE_TYPE 1
typedef struct vm_range_t {
vm_address_t address;
vm_size_t size;
} vm_range_t;
typedef kern_return_t (*memory_reader_t)(task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory);
typedef void (*vm_range_recorder_t)(task_t task, void *baton, unsigned type, vm_range_t *range, unsigned size);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t task, void *, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, vm_range_recorder_t recorder); /* enumerates all the malloc pointers in use */
} malloc_introspection_t;
typedef struct malloc_zone_t {
void *reserved1[12];
struct malloc_introspection_t *introspect;
} malloc_zone_t;
memory_reader_t task_peek = [](task_t task, vm_address_t remote_address, vm_size_t size, void **local_memory) -> kern_return_t {
*local_memory = (void*) remote_address;
return KERN_SUCCESS;
};
vm_address_t *zones = 0;
unsigned int num_zones = 0;task_t task = 0;
kern_return_t err = (kern_return_t)malloc_get_all_zones (task, task_peek, &zones, &num_zones);
if (KERN_SUCCESS == err)
{
for (unsigned int i=0; i<num_zones; ++i)
{
const malloc_zone_t *zone = (const malloc_zone_t *)zones[i];
if (zone && zone->introspect)
zone->introspect->enumerator (task,
&baton,
MALLOC_PTR_IN_USE_RANGE_TYPE,
(vm_address_t)zone,
task_peek,
[] (task_t task, void *baton, unsigned type, vm_range_t *ranges, unsigned size) -> void
{
range_callback_t callback = ((callback_baton_t *)baton)->callback;
for (unsigned i=0; i<size; ++i)
{
callback (task, baton, type, ranges[i].address, ranges[i].size);
}
});
}
}'''
if options.search_stack:
expr += '''
#ifdef NUM_STACKS
// Call the callback for the thread stack ranges
for (uint32_t i=0; i<NUM_STACKS; ++i) {
range_callback(task, &baton, 8, stacks[i].base, stacks[i].size);
if (STACK_RED_ZONE_SIZE > 0) {
range_callback(task, &baton, 16, stacks[i].base - STACK_RED_ZONE_SIZE, STACK_RED_ZONE_SIZE);
}
}
#endif'''
if options.search_segments:
expr += '''
#ifdef NUM_SEGMENTS
// Call the callback for all segments
for (uint32_t i=0; i<NUM_SEGMENTS; ++i)
range_callback(task, &baton, 32, segments[i].base, segments[i].size);
#endif'''
if user_return_code:
expr += "\n%s" % (user_return_code,)
return expr
def get_member_types_for_offset(value_type, offset, member_list):
member = value_type.GetFieldAtIndex(0)
search_bases = False
if member:
if member.GetOffsetInBytes() <= offset:
for field_idx in range(value_type.GetNumberOfFields()):
member = value_type.GetFieldAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
else:
search_bases = True
else:
search_bases = True
if search_bases:
for field_idx in range(value_type.GetNumberOfDirectBaseClasses()):
member = value_type.GetDirectBaseClassAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
for field_idx in range(value_type.GetNumberOfVirtualBaseClasses()):
member = value_type.GetVirtualBaseClassAtIndex(field_idx)
member_byte_offset = member.GetOffsetInBytes()
member_end_byte_offset = member_byte_offset + member.type.size
if member_byte_offset <= offset and offset < member_end_byte_offset:
member_list.append(member)
get_member_types_for_offset(
member.type, offset - member_byte_offset, member_list)
return
def append_regex_callback(option, opt, value, parser):
try:
ivar_regex = re.compile(value)
parser.values.ivar_regex_blacklist.append(ivar_regex)
except:
print('error: an exception was thrown when compiling the ivar regular expression for "%s"' % value)
def add_common_options(parser):
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option(
'-t',
'--type',
action='store_true',
dest='print_type',
help='print the full value of the type for each matching malloc block',
default=False)
parser.add_option(
'-o',
'--po',
action='store_true',
dest='print_object_description',
help='print the object descriptions for any matches',
default=False)
parser.add_option(
'-z',
'--size',
action='store_true',
dest='show_size',
help='print the allocation size in bytes',
default=False)
parser.add_option(
'-r',
'--range',
action='store_true',
dest='show_range',
help='print the allocation address range instead of just the allocation base address',
default=False)
parser.add_option(
'-m',
'--memory',
action='store_true',
dest='memory',
help='dump the memory for each matching block',
default=False)
parser.add_option(
'-f',
'--format',
type='string',
dest='format',
help='the format to use when dumping memory if --memory is specified',
default=None)
parser.add_option(
'-I',
'--omit-ivar-regex',
type='string',
action='callback',
callback=append_regex_callback,
dest='ivar_regex_blacklist',
default=[],
help='specify one or more regular expressions used to backlist any matches that are in ivars')
parser.add_option(
'-s',
'--stack',
action='store_true',
dest='stack',
help='gets the stack that allocated each malloc block if MallocStackLogging is enabled',
default=False)
parser.add_option(
'-S',
'--stack-history',
action='store_true',
dest='stack_history',
help='gets the stack history for all allocations whose start address matches each malloc block if MallocStackLogging is enabled',
default=False)
parser.add_option(
'-F',
'--max-frames',
type='int',
dest='max_frames',
help='the maximum number of stack frames to print when using the --stack or --stack-history options (default=128)',
default=128)
parser.add_option(
'-H',
'--max-history',
type='int',
dest='max_history',
help='the maximum number of stack history backtraces to print for each allocation when using the --stack-history option (default=16)',
default=16)
parser.add_option(
'-M',
'--max-matches',
type='int',
dest='max_matches',
help='the maximum number of matches to print',
default=32)
parser.add_option(
'-O',
'--offset',
type='int',
dest='offset',
help='the matching data must be at this offset',
default=-1)
parser.add_option(
'--ignore-stack',
action='store_false',
dest='search_stack',
help="Don't search the stack when enumerating memory",
default=True)
parser.add_option(
'--ignore-heap',
action='store_false',
dest='search_heap',
help="Don't search the heap allocations when enumerating memory",
default=True)
parser.add_option(
'--ignore-segments',
action='store_false',
dest='search_segments',
help="Don't search readable executable segments enumerating memory",
default=True)
parser.add_option(
'-V',
'--vm-regions',
action='store_true',
dest='search_vm_regions',
help='Check all VM regions instead of searching the heap, stack and segments',
default=False)
def type_flags_to_string(type_flags):
if type_flags == 0:
type_str = 'free'
elif type_flags & 2:
type_str = 'malloc'
elif type_flags & 4:
type_str = 'free'
elif type_flags & 1:
type_str = 'generic'
elif type_flags & 8:
type_str = 'stack'
elif type_flags & 16:
type_str = 'stack (red zone)'
elif type_flags & 32:
type_str = 'segment'
elif type_flags & 64:
type_str = 'vm_region'
else:
type_str = hex(type_flags)
return type_str
def find_variable_containing_address(verbose, frame, match_addr):
variables = frame.GetVariables(True, True, True, True)
matching_var = None
for var in variables:
var_addr = var.GetLoadAddress()
if var_addr != lldb.LLDB_INVALID_ADDRESS:
byte_size = var.GetType().GetByteSize()
if verbose:
print('frame #%u: [%#x - %#x) %s' % (frame.GetFrameID(), var.load_addr, var.load_addr + byte_size, var.name))
if var_addr == match_addr:
if verbose:
print('match')
return var
else:
if byte_size > 0 and var_addr <= match_addr and match_addr < (
var_addr + byte_size):
if verbose:
print('match')
return var
return None
def find_frame_for_stack_address(process, addr):
closest_delta = sys.maxsize
closest_frame = None
# print 'find_frame_for_stack_address(%#x)' % (addr)
for thread in process:
prev_sp = lldb.LLDB_INVALID_ADDRESS
for frame in thread:
cfa = frame.GetCFA()
# print 'frame #%u: cfa = %#x' % (frame.GetFrameID(), cfa)
if addr < cfa:
delta = cfa - addr
# print '%#x < %#x, delta = %i' % (addr, cfa, delta)
if delta < closest_delta:
# print 'closest'
closest_delta = delta
closest_frame = frame
# else:
# print 'delta >= closest_delta'
return closest_frame
def type_flags_to_description(
process,
type_flags,
ptr_addr,
ptr_size,
offset,
match_addr):
show_offset = False
if type_flags == 0 or type_flags & 4:
type_str = 'free(%#x)' % (ptr_addr,)
elif type_flags & 2 or type_flags & 1:
type_str = 'malloc(%6u) -> %#x' % (ptr_size, ptr_addr)
show_offset = True
elif type_flags & 8:
type_str = 'stack'
frame = find_frame_for_stack_address(process, match_addr)
if frame:
type_str += ' in frame #%u of thread #%u: tid %#x' % (frame.GetFrameID(
), frame.GetThread().GetIndexID(), frame.GetThread().GetThreadID())
variables = frame.GetVariables(True, True, True, True)
matching_var = None
for var in variables:
var_addr = var.GetLoadAddress()
if var_addr != lldb.LLDB_INVALID_ADDRESS:
# print 'variable "%s" @ %#x (%#x)' % (var.name, var.load_addr,
# match_addr)
if var_addr == match_addr:
matching_var = var
break
else:
byte_size = var.GetType().GetByteSize()
if byte_size > 0 and var_addr <= match_addr and match_addr < (
var_addr + byte_size):
matching_var = var
break
if matching_var:
type_str += ' in variable at %#x:\n %s' % (
matching_var.GetLoadAddress(), matching_var)
elif type_flags & 16:
type_str = 'stack (red zone)'
elif type_flags & 32:
sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset)
type_str = 'segment [%#x - %#x), %s + %u, %s' % (
ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr)
elif type_flags & 64:
sb_addr = process.GetTarget().ResolveLoadAddress(ptr_addr + offset)
type_str = 'vm_region [%#x - %#x), %s + %u, %s' % (
ptr_addr, ptr_addr + ptr_size, sb_addr.section.name, sb_addr.offset, sb_addr)
else:
type_str = '%#x' % (ptr_addr,)
show_offset = True
if show_offset and offset != 0:
type_str += ' + %-6u' % (offset,)
return type_str
def dump_stack_history_entry(options, result, stack_history_entry, idx):
address = int(stack_history_entry.address)
if address:
type_flags = int(stack_history_entry.type_flags)
symbolicator = lldb.utils.symbolication.Symbolicator()
symbolicator.target = lldb.debugger.GetSelectedTarget()
type_str = type_flags_to_string(type_flags)
result.AppendMessage(
'stack[%u]: addr = 0x%x, type=%s, frames:' %
(idx, address, type_str))
frame_idx = 0
idx = 0
pc = int(stack_history_entry.frames[idx])
while pc != 0:
if pc >= 0x1000:
frames = symbolicator.symbolicate(pc)
if frames:
for frame in frames:
result.AppendMessage(
' [%u] %s' %
(frame_idx, frame))
frame_idx += 1
else:
result.AppendMessage(' [%u] 0x%x' % (frame_idx, pc))
frame_idx += 1
idx = idx + 1
pc = int(stack_history_entry.frames[idx])
else:
pc = 0
if idx >= options.max_frames:
result.AppendMessage(
'warning: the max number of stack frames (%u) was reached, use the "--max-frames=<COUNT>" option to see more frames' %
(options.max_frames))
result.AppendMessage('')
def dump_stack_history_entries(options, result, addr, history):
# malloc_stack_entry *get_stack_history_for_address (const void * addr)
expr_prefix = '''
typedef int kern_return_t;
typedef struct $malloc_stack_entry {
uint64_t address;
uint64_t argument;
uint32_t type_flags;
uint32_t num_frames;
uint64_t frames[512];
kern_return_t err;
} $malloc_stack_entry;
'''
single_expr = '''
#define MAX_FRAMES %u
typedef unsigned task_t;
$malloc_stack_entry stack;
stack.address = 0x%x;
stack.type_flags = 2;
stack.num_frames = 0;
stack.frames[0] = 0;
uint32_t max_stack_frames = MAX_FRAMES;
stack.err = (kern_return_t)__mach_stack_logging_get_frames (
(task_t)mach_task_self(),
stack.address,
&stack.frames[0],
max_stack_frames,
&stack.num_frames);
if (stack.num_frames < MAX_FRAMES)
stack.frames[stack.num_frames] = 0;
else
stack.frames[MAX_FRAMES-1] = 0;
stack''' % (options.max_frames, addr)
history_expr = '''
typedef int kern_return_t;
typedef unsigned task_t;
#define MAX_FRAMES %u
#define MAX_HISTORY %u
typedef struct mach_stack_logging_record_t {
uint32_t type_flags;
uint64_t stack_identifier;
uint64_t argument;
uint64_t address;
} mach_stack_logging_record_t;
typedef void (*enumerate_callback_t)(mach_stack_logging_record_t, void *);
typedef struct malloc_stack_entry {
uint64_t address;
uint64_t argument;
uint32_t type_flags;
uint32_t num_frames;
uint64_t frames[MAX_FRAMES];
kern_return_t frames_err;
} malloc_stack_entry;
typedef struct $malloc_stack_history {
task_t task;
unsigned idx;
malloc_stack_entry entries[MAX_HISTORY];
} $malloc_stack_history;
$malloc_stack_history lldb_info = { (task_t)mach_task_self(), 0 };
uint32_t max_stack_frames = MAX_FRAMES;
enumerate_callback_t callback = [] (mach_stack_logging_record_t stack_record, void *baton) -> void {
$malloc_stack_history *lldb_info = ($malloc_stack_history *)baton;
if (lldb_info->idx < MAX_HISTORY) {
malloc_stack_entry *stack_entry = &(lldb_info->entries[lldb_info->idx]);
stack_entry->address = stack_record.address;
stack_entry->type_flags = stack_record.type_flags;
stack_entry->argument = stack_record.argument;
stack_entry->num_frames = 0;
stack_entry->frames[0] = 0;
stack_entry->frames_err = (kern_return_t)__mach_stack_logging_frames_for_uniqued_stack (
lldb_info->task,
stack_record.stack_identifier,
stack_entry->frames,
(uint32_t)MAX_FRAMES,
&stack_entry->num_frames);
// Terminate the frames with zero if there is room
if (stack_entry->num_frames < MAX_FRAMES)
stack_entry->frames[stack_entry->num_frames] = 0;
}
++lldb_info->idx;
};
(kern_return_t)__mach_stack_logging_enumerate_records (lldb_info.task, (uint64_t)0x%x, callback, &lldb_info);
lldb_info''' % (options.max_frames, options.max_history, addr)
frame = lldb.debugger.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if history:
expr = history_expr
else:
expr = single_expr
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(5 * 1000 * 1000) # 5 second timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
expr_options.SetPrefix(expr_prefix)
expr_sbvalue = frame.EvaluateExpression(expr, expr_options)
if options.verbose:
print("expression:")
print(expr)
print("expression result:")
print(expr_sbvalue)
if expr_sbvalue.error.Success():
if history:
malloc_stack_history = lldb.value(expr_sbvalue)
num_stacks = int(malloc_stack_history.idx)
if num_stacks <= options.max_history:
i_max = num_stacks
else:
i_max = options.max_history
for i in range(i_max):
stack_history_entry = malloc_stack_history.entries[i]
dump_stack_history_entry(
options, result, stack_history_entry, i)
if num_stacks > options.max_history:
result.AppendMessage(
'warning: the max number of stacks (%u) was reached, use the "--max-history=%u" option to see all of the stacks' %
(options.max_history, num_stacks))
else:
stack_history_entry = lldb.value(expr_sbvalue)
dump_stack_history_entry(options, result, stack_history_entry, 0)
else:
result.AppendMessage(
'error: expression failed "%s" => %s' %
(expr, expr_sbvalue.error))
def display_match_results(
process,
result,
options,
arg_str_description,
expr,
print_no_matches,
expr_prefix=None):
frame = lldb.debugger.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return 0
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetFetchDynamicValue(lldb.eNoDynamicValues)
expr_options.SetTimeoutInMicroSeconds(
30 * 1000 * 1000) # 30 second timeout
expr_options.SetTryAllThreads(False)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
if expr_prefix:
expr_options.SetPrefix(expr_prefix)
expr_sbvalue = frame.EvaluateExpression(expr, expr_options)
if options.verbose:
print("expression:")
print(expr)
print("expression result:")
print(expr_sbvalue)
if expr_sbvalue.error.Success():
match_value = lldb.value(expr_sbvalue)
i = 0
match_idx = 0
while True:
print_entry = True
match_entry = match_value[i]
i += 1
if i > options.max_matches:
result.AppendMessage(
'warning: the max number of matches (%u) was reached, use the --max-matches option to get more results' %
(options.max_matches))
break
malloc_addr = match_entry.addr.sbvalue.unsigned
if malloc_addr == 0:
break
malloc_size = int(match_entry.size)
offset = int(match_entry.offset)
if options.offset >= 0 and options.offset != offset:
print_entry = False
else:
match_addr = malloc_addr + offset
type_flags = int(match_entry.type)
#result.AppendMessage (hex(malloc_addr + offset))
if type_flags == 64:
search_stack_old = options.search_stack
search_segments_old = options.search_segments
search_heap_old = options.search_heap
search_vm_regions = options.search_vm_regions
options.search_stack = True
options.search_segments = True
options.search_heap = True
options.search_vm_regions = False
if malloc_info_impl(lldb.debugger, result, options, [
hex(malloc_addr + offset)]):
print_entry = False
options.search_stack = search_stack_old
options.search_segments = search_segments_old
options.search_heap = search_heap_old
options.search_vm_regions = search_vm_regions
if print_entry:
description = '%#16.16x: %s' % (match_addr, type_flags_to_description(
process, type_flags, malloc_addr, malloc_size, offset, match_addr))
if options.show_size:
description += ' <%5u>' % (malloc_size)
if options.show_range:
description += ' [%#x - %#x)' % (
malloc_addr, malloc_addr + malloc_size)
derefed_dynamic_value = None
dynamic_value = match_entry.addr.sbvalue.GetDynamicValue(
lldb.eDynamicCanRunTarget)
if dynamic_value.type.name == 'void *':
if options.type == 'pointer' and malloc_size == 4096:
error = lldb.SBError()
process = expr_sbvalue.GetProcess()
target = expr_sbvalue.GetTarget()
data = bytearray(
process.ReadMemory(
malloc_addr, 16, error))
if data == '\xa1\xa1\xa1\xa1AUTORELEASE!':
ptr_size = target.addr_size
thread = process.ReadUnsignedFromMemory(
malloc_addr + 16 + ptr_size, ptr_size, error)
# 4 bytes 0xa1a1a1a1
# 12 bytes 'AUTORELEASE!'
# ptr bytes autorelease insertion point
# ptr bytes pthread_t
# ptr bytes next colder page
# ptr bytes next hotter page
# 4 bytes this page's depth in the list
# 4 bytes high-water mark
description += ' AUTORELEASE! for pthread_t %#x' % (
thread)
# else:
# description += 'malloc(%u)' % (malloc_size)
# else:
# description += 'malloc(%u)' % (malloc_size)
else:
derefed_dynamic_value = dynamic_value.deref
if derefed_dynamic_value:
derefed_dynamic_type = derefed_dynamic_value.type
derefed_dynamic_type_size = derefed_dynamic_type.size
derefed_dynamic_type_name = derefed_dynamic_type.name
description += ' '
description += derefed_dynamic_type_name
if offset < derefed_dynamic_type_size:
member_list = list()
get_member_types_for_offset(
derefed_dynamic_type, offset, member_list)
if member_list:
member_path = ''
for member in member_list:
member_name = member.name
if member_name:
if member_path:
member_path += '.'
member_path += member_name
if member_path:
if options.ivar_regex_blacklist:
for ivar_regex in options.ivar_regex_blacklist:
if ivar_regex.match(
member_path):
print_entry = False
description += '.%s' % (member_path)
else:
description += '%u bytes after %s' % (
offset - derefed_dynamic_type_size, derefed_dynamic_type_name)
else:
# strip the "*" from the end of the name since we
# were unable to dereference this
description += dynamic_value.type.name[0:-1]
if print_entry:
match_idx += 1
result_output = ''
if description:
result_output += description
if options.print_type and derefed_dynamic_value:
result_output += ' %s' % (derefed_dynamic_value)
if options.print_object_description and dynamic_value:
desc = dynamic_value.GetObjectDescription()
if desc:
result_output += '\n%s' % (desc)
if result_output:
result.AppendMessage(result_output)
if options.memory:
cmd_result = lldb.SBCommandReturnObject()
if options.format is None:
memory_command = "memory read --force 0x%x 0x%x" % (
malloc_addr, malloc_addr + malloc_size)
else:
memory_command = "memory read --force -f %s 0x%x 0x%x" % (
options.format, malloc_addr, malloc_addr + malloc_size)
if options.verbose:
result.AppendMessage(memory_command)
lldb.debugger.GetCommandInterpreter().HandleCommand(memory_command, cmd_result)
result.AppendMessage(cmd_result.GetOutput())
if options.stack_history:
dump_stack_history_entries(options, result, malloc_addr, 1)
elif options.stack:
dump_stack_history_entries(options, result, malloc_addr, 0)
return i
else:
result.AppendMessage(str(expr_sbvalue.error))
return 0
def get_ptr_refs_options():
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches all allocations on the heap for pointer values on
darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the pointers and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='ptr_refs',
usage=usage)
add_common_options(parser)
return parser
def find_variable(debugger, command, result, dict):
usage = "usage: %prog [options] <ADDR> [ADDR ...]"
description = '''Searches for a local variable in all frames that contains a hex ADDR.'''
command_args = shlex.split(command)
parser = optparse.OptionParser(
description=description,
prog='find_variable',
usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
for arg in args:
var_addr = int(arg, 16)
print("Finding a variable with address %#x..." % (var_addr), file=result)
done = False
for thread in process:
for frame in thread:
var = find_variable_containing_address(
options.verbose, frame, var_addr)
if var:
print(var)
done = True
break
if done:
break
def ptr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_ptr_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'pointer'
if options.format is None:
options.format = "A" # 'A' is "address" format
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
void *ptr;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
typedef void* T;
const unsigned size = sizeof(T);
T *array = (T*)ptr_addr;
for (unsigned idx = 0; ((idx + 1) * sizeof(T)) <= ptr_size; ++idx) {
if (array[idx] == lldb_info->ptr) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = idx*sizeof(T);
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
};
callback_baton_t baton = { range_callback, 0, {0}, (void *)%s };
'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our pointer expressions and display the
# results
for ptr_expr in args:
user_init_code = user_init_code_format % (
options.max_matches, ptr_expr)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'malloc block containing pointer %s' % ptr_expr
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage('error: no pointer arguments were given')
def get_cstr_refs_options():
usage = "usage: %prog [options] <CSTR> [CSTR ...]"
description = '''Searches all allocations on the heap for C string values on
darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the C strings and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='cstr_refs',
usage=usage)
add_common_options(parser)
return parser
def cstr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_cstr_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'cstr'
if options.format is None:
options.format = "Y" # 'Y' is "bytes with ASCII" format
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
const char *cstr;
unsigned cstr_len;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (lldb_info->cstr_len < ptr_size) {
const char *begin = (const char *)ptr_addr;
const char *end = begin + ptr_size - lldb_info->cstr_len;
for (const char *s = begin; s < end; ++s) {
if ((int)memcmp(s, lldb_info->cstr, lldb_info->cstr_len) == 0) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = s - begin;
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
}
};
const char *cstr = "%s";
callback_baton_t baton = { range_callback, 0, {0}, cstr, (unsigned)strlen(cstr) };'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our pointer expressions and display the
# results
for cstr in args:
user_init_code = user_init_code_format % (
options.max_matches, cstr)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'malloc block containing "%s"' % cstr
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage(
'error: command takes one or more C string arguments')
def get_malloc_info_options():
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches the heap a malloc block that contains the addresses
specified as one or more address expressions. Any matches that were found will
dump the malloc blocks that match or contain the specified address. The matching
blocks might be able to show what kind of objects they are using dynamic type
information in the program.'''
parser = optparse.OptionParser(
description=description,
prog='malloc_info',
usage=usage)
add_common_options(parser)
return parser
def malloc_info(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_malloc_info_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
malloc_info_impl(debugger, result, options, args)
def malloc_info_impl(debugger, result, options, args):
# We are specifically looking for something on the heap only
options.type = 'malloc_info'
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
typedef struct callback_baton_t {
range_callback_t callback;
unsigned num_matches;
$malloc_match matches[2]; // Two items so they can be NULL terminated
void *ptr;
} callback_baton_t;
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (lldb_info->num_matches == 0) {
uint8_t *p = (uint8_t *)lldb_info->ptr;
uint8_t *lo = (uint8_t *)ptr_addr;
uint8_t *hi = lo + ptr_size;
if (lo <= p && p < hi) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = p - lo;
lldb_info->matches[lldb_info->num_matches].type = type;
lldb_info->num_matches = 1;
}
}
};
callback_baton_t baton = { range_callback, 0, {0}, (void *)%s };
baton.matches[0].addr = 0;
baton.matches[1].addr = 0;'''
if args:
total_matches = 0
for ptr_expr in args:
user_init_code = user_init_code_format % (ptr_expr)
expr = get_iterate_memory_expr(
options, process, user_init_code, 'baton.matches')
arg_str_description = 'malloc block that contains %s' % ptr_expr
total_matches += display_match_results(
process, result, options, arg_str_description, expr, True, expr_prefix)
return total_matches
else:
result.AppendMessage(
'error: command takes one or more pointer expressions')
return 0
def get_thread_stack_ranges_struct(process):
'''Create code that defines a structure that represents threads stack bounds
for all threads. It returns a static sized array initialized with all of
the tid, base, size structs for all the threads.'''
stack_dicts = list()
if process:
i = 0
for thread in process:
min_sp = thread.frame[0].sp
max_sp = min_sp
for frame in thread.frames:
sp = frame.sp
if sp < min_sp:
min_sp = sp
if sp > max_sp:
max_sp = sp
if min_sp < max_sp:
stack_dicts.append({'tid': thread.GetThreadID(
), 'base': min_sp, 'size': max_sp - min_sp, 'index': i})
i += 1
stack_dicts_len = len(stack_dicts)
if stack_dicts_len > 0:
result = '''
#define NUM_STACKS %u
#define STACK_RED_ZONE_SIZE %u
typedef struct thread_stack_t { uint64_t tid, base, size; } thread_stack_t;
thread_stack_t stacks[NUM_STACKS];''' % (stack_dicts_len, process.target.GetStackRedZoneSize())
for stack_dict in stack_dicts:
result += '''
stacks[%(index)u].tid = 0x%(tid)x;
stacks[%(index)u].base = 0x%(base)x;
stacks[%(index)u].size = 0x%(size)x;''' % stack_dict
return result
else:
return ''
def get_sections_ranges_struct(process):
'''Create code that defines a structure that represents all segments that
can contain data for all images in "target". It returns a static sized
array initialized with all of base, size structs for all the threads.'''
target = process.target
segment_dicts = list()
for (module_idx, module) in enumerate(target.modules):
for sect_idx in range(module.GetNumSections()):
section = module.GetSectionAtIndex(sect_idx)
if not section:
break
name = section.name
if name != '__TEXT' and name != '__LINKEDIT' and name != '__PAGEZERO':
base = section.GetLoadAddress(target)
size = section.GetByteSize()
if base != lldb.LLDB_INVALID_ADDRESS and size > 0:
segment_dicts.append({'base': base, 'size': size})
segment_dicts_len = len(segment_dicts)
if segment_dicts_len > 0:
result = '''
#define NUM_SEGMENTS %u
typedef struct segment_range_t { uint64_t base; uint32_t size; } segment_range_t;
segment_range_t segments[NUM_SEGMENTS];''' % (segment_dicts_len,)
for (idx, segment_dict) in enumerate(segment_dicts):
segment_dict['index'] = idx
result += '''
segments[%(index)u].base = 0x%(base)x;
segments[%(index)u].size = 0x%(size)x;''' % segment_dict
return result
else:
return ''
def section_ptr_refs(debugger, command, result, dict):
command_args = shlex.split(command)
usage = "usage: %prog [options] <EXPR> [EXPR ...]"
description = '''Searches section contents for pointer values in darwin user space programs.'''
parser = optparse.OptionParser(
description=description,
prog='section_ptr_refs',
usage=usage)
add_common_options(parser)
parser.add_option(
'--section',
action='append',
type='string',
dest='section_names',
help='section name to search',
default=list())
try:
(options, args) = parser.parse_args(command_args)
except:
return
options.type = 'pointer'
sections = list()
section_modules = list()
if not options.section_names:
result.AppendMessage(
'error: at least one section must be specified with the --section option')
return
target = debugger.GetSelectedTarget()
for module in target.modules:
for section_name in options.section_names:
section = module.section[section_name]
if section:
sections.append(section)
section_modules.append(module)
if sections:
dylid_load_err = load_dylib()
if dylid_load_err:
result.AppendMessage(dylid_load_err)
return
frame = target.GetProcess().GetSelectedThread().GetSelectedFrame()
for expr_str in args:
for (idx, section) in enumerate(sections):
expr = 'find_pointer_in_memory(0x%xllu, %ullu, (void *)%s)' % (
section.addr.load_addr, section.size, expr_str)
arg_str_description = 'section %s.%s containing "%s"' % (
section_modules[idx].file.fullpath, section.name, expr_str)
num_matches = display_match_results(
target.GetProcess(), result, options, arg_str_description, expr, False)
if num_matches:
if num_matches < options.max_matches:
options.max_matches = options.max_matches - num_matches
else:
options.max_matches = 0
if options.max_matches == 0:
return
else:
result.AppendMessage(
'error: no sections were found that match any of %s' %
(', '.join(
options.section_names)))
def get_objc_refs_options():
usage = "usage: %prog [options] <CLASS> [CLASS ...]"
description = '''Searches all allocations on the heap for instances of
objective C classes, or any classes that inherit from the specified classes
in darwin user space programs. Any matches that were found will dump the malloc
blocks that contain the C strings and might be able to print what kind of
objects the pointers are contained in using dynamic type information in the
program.'''
parser = optparse.OptionParser(
description=description,
prog='objc_refs',
usage=usage)
add_common_options(parser)
return parser
def objc_refs(debugger, command, result, dict):
command_args = shlex.split(command)
parser = get_objc_refs_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
process = debugger.GetSelectedTarget().GetProcess()
if not process:
result.AppendMessage('error: invalid process')
return
frame = process.GetSelectedThread().GetSelectedFrame()
if not frame:
result.AppendMessage('error: invalid frame')
return
options.type = 'isa'
if options.format is None:
options.format = "A" # 'A' is "address" format
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(
3 * 1000 * 1000) # 3 second infinite timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
num_objc_classes_value = frame.EvaluateExpression(
"(int)objc_getClassList((void *)0, (int)0)", expr_options)
if not num_objc_classes_value.error.Success():
result.AppendMessage('error: %s' %
num_objc_classes_value.error.GetCString())
return
num_objc_classes = num_objc_classes_value.GetValueAsUnsigned()
if num_objc_classes == 0:
result.AppendMessage('error: no objective C classes in program')
return
if args:
# When we initialize the expression, we must define any types that
# we will need when looking at every allocation. We must also define
# a type named callback_baton_t and make an instance named "baton"
# and initialize it how ever we want to. The address of "baton" will
# be passed into our range callback. callback_baton_t must contain
# a member named "callback" whose type is "range_callback_t". This
# will be used by our zone callbacks to call the range callback for
# each malloc range.
expr_prefix = '''
struct $malloc_match {
void *addr;
uintptr_t size;
uintptr_t offset;
uintptr_t type;
};
'''
user_init_code_format = '''
#define MAX_MATCHES %u
typedef int (*compare_callback_t)(const void *a, const void *b);
typedef struct callback_baton_t {
range_callback_t callback;
compare_callback_t compare_callback;
unsigned num_matches;
$malloc_match matches[MAX_MATCHES];
void *isa;
Class classes[%u];
} callback_baton_t;
compare_callback_t compare_callback = [](const void *a, const void *b) -> int {
Class a_ptr = *(Class *)a;
Class b_ptr = *(Class *)b;
if (a_ptr < b_ptr) return -1;
if (a_ptr > b_ptr) return +1;
return 0;
};
typedef Class (*class_getSuperclass_type)(void *isa);
range_callback_t range_callback = [](task_t task, void *baton, unsigned type, uintptr_t ptr_addr, uintptr_t ptr_size) -> void {
class_getSuperclass_type class_getSuperclass_impl = (class_getSuperclass_type)class_getSuperclass;
callback_baton_t *lldb_info = (callback_baton_t *)baton;
if (sizeof(Class) <= ptr_size) {
Class *curr_class_ptr = (Class *)ptr_addr;
Class *matching_class_ptr = (Class *)bsearch (curr_class_ptr,
(const void *)lldb_info->classes,
sizeof(lldb_info->classes)/sizeof(Class),
sizeof(Class),
lldb_info->compare_callback);
if (matching_class_ptr) {
bool match = false;
if (lldb_info->isa) {
Class isa = *curr_class_ptr;
if (lldb_info->isa == isa)
match = true;
else { // if (lldb_info->objc.match_superclasses) {
Class super = class_getSuperclass_impl(isa);
while (super) {
if (super == lldb_info->isa) {
match = true;
break;
}
super = class_getSuperclass_impl(super);
}
}
}
else
match = true;
if (match) {
if (lldb_info->num_matches < MAX_MATCHES) {
lldb_info->matches[lldb_info->num_matches].addr = (void*)ptr_addr;
lldb_info->matches[lldb_info->num_matches].size = ptr_size;
lldb_info->matches[lldb_info->num_matches].offset = 0;
lldb_info->matches[lldb_info->num_matches].type = type;
++lldb_info->num_matches;
}
}
}
}
};
callback_baton_t baton = { range_callback, compare_callback, 0, {0}, (void *)0x%x, {0} };
int nc = (int)objc_getClassList(baton.classes, sizeof(baton.classes)/sizeof(Class));
(void)qsort (baton.classes, sizeof(baton.classes)/sizeof(Class), sizeof(Class), compare_callback);'''
# We must also define a snippet of code to be run that returns
# the result of the expression we run.
# Here we return NULL if our pointer was not found in any malloc blocks,
# and we return the address of the matches array so we can then access
# the matching results
user_return_code = '''if (baton.num_matches < MAX_MATCHES)
baton.matches[baton.num_matches].addr = 0; // Terminate the matches array
baton.matches'''
# Iterate through all of our ObjC class name arguments
for class_name in args:
addr_expr_str = "(void *)[%s class]" % class_name
expr_options = lldb.SBExpressionOptions()
expr_options.SetIgnoreBreakpoints(True)
expr_options.SetTimeoutInMicroSeconds(
1 * 1000 * 1000) # 1 second timeout
expr_options.SetTryAllThreads(True)
expr_options.SetLanguage(lldb.eLanguageTypeObjC_plus_plus)
expr_sbvalue = frame.EvaluateExpression(
addr_expr_str, expr_options)
if expr_sbvalue.error.Success():
isa = expr_sbvalue.unsigned
if isa:
options.type = 'isa'
result.AppendMessage(
'Searching for all instances of classes or subclasses of "%s" (isa=0x%x)' %
(class_name, isa))
user_init_code = user_init_code_format % (
options.max_matches, num_objc_classes, isa)
expr = get_iterate_memory_expr(
options, process, user_init_code, user_return_code)
arg_str_description = 'objective C classes with isa 0x%x' % isa
display_match_results(
process,
result,
options,
arg_str_description,
expr,
True,
expr_prefix)
else:
result.AppendMessage(
'error: Can\'t find isa for an ObjC class named "%s"' %
(class_name))
else:
result.AppendMessage(
'error: expression error for "%s": %s' %
(addr_expr_str, expr_sbvalue.error))
else:
result.AppendMessage(
'error: command takes one or more C string arguments')
if __name__ == '__main__':
lldb.debugger = lldb.SBDebugger.Create()
# Make the options so we can generate the help text for the new LLDB
# command line command prior to registering it with LLDB below. This way
# if clients in LLDB type "help malloc_info", they will see the exact same
# output as typing "malloc_info --help".
ptr_refs.__doc__ = get_ptr_refs_options().format_help()
cstr_refs.__doc__ = get_cstr_refs_options().format_help()
malloc_info.__doc__ = get_malloc_info_options().format_help()
objc_refs.__doc__ = get_objc_refs_options().format_help()
lldb.debugger.HandleCommand(
'command script add -f %s.ptr_refs ptr_refs' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.cstr_refs cstr_refs' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.malloc_info malloc_info' %
__name__)
lldb.debugger.HandleCommand(
'command script add -f %s.find_variable find_variable' %
__name__)
# lldb.debugger.HandleCommand('command script add -f %s.heap heap' % package_name)
# lldb.debugger.HandleCommand('command script add -f %s.section_ptr_refs section_ptr_refs' % package_name)
# lldb.debugger.HandleCommand('command script add -f %s.stack_ptr_refs stack_ptr_refs' % package_name)
lldb.debugger.HandleCommand(
'command script add -f %s.objc_refs objc_refs' %
__name__)
print('"malloc_info", "ptr_refs", "cstr_refs", "find_variable", and "objc_refs" commands have been installed, use the "--help" options on these commands for detailed help.')
| endlessm/chromium-browser | third_party/llvm/lldb/examples/darwin/heap_find/heap.py | Python | bsd-3-clause | 61,273 |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/cr_elements/md_select_css.m.js';
import './print_preview_shared_css.js';
import './settings_section.js';
import {PolymerElement} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js';
import {getTemplate} from './color_settings.html.js';
import {SelectMixin} from './select_mixin.js';
import {SettingsMixin} from './settings_mixin.js';
const PrintPreviewColorSettingsElementBase =
SettingsMixin(SelectMixin(PolymerElement));
export class PrintPreviewColorSettingsElement extends
PrintPreviewColorSettingsElementBase {
static get is() {
return 'print-preview-color-settings';
}
static get template() {
return getTemplate();
}
static get properties() {
return {
disabled: Boolean,
disabled_: {
type: Boolean,
computed: 'computeDisabled_(disabled, settings.color.setByPolicy)',
},
};
}
static get observers() {
return ['onColorSettingChange_(settings.color.value)'];
}
disabled: boolean;
private disabled_: boolean;
private onColorSettingChange_(newValue: boolean) {
this.selectedValue = newValue ? 'color' : 'bw';
}
/**
* @param disabled Whether color selection is disabled.
* @param managed Whether color selection is managed.
* @return Whether drop-down should be disabled.
*/
private computeDisabled_(disabled: boolean, managed: boolean): boolean {
return disabled || managed;
}
/** @param value The new select value. */
onProcessSelectChange(value: string) {
this.setSetting('color', value === 'color');
}
}
declare global {
interface HTMLElementTagNameMap {
'print-preview-color-settings': PrintPreviewColorSettingsElement;
}
}
customElements.define(
PrintPreviewColorSettingsElement.is, PrintPreviewColorSettingsElement);
| chromium/chromium | chrome/browser/resources/print_preview/ui/color_settings.ts | TypeScript | bsd-3-clause | 1,983 |
<?php
/**
* ezcCacheStorageFileEvalArrayTest
*
* @package Cache
* @subpackage Tests
* @version 1.5
* @copyright Copyright (C) 2005-2009 eZ Systems AS. All rights reserved.
* @license http://ez.no/licenses/new_bsd New BSD License
*/
/**
* Require parent test class.
*/
require_once 'storage_test.php';
/**
* Test suite for ezcStorageFileEvalArray class.
*
* @package Cache
* @subpackage Tests
*/
class ezcCacheStorageFileEvalArrayTest extends ezcCacheStorageTest
{
public static function suite()
{
return new PHPUnit_Framework_TestSuite( "ezcCacheStorageFileEvalArrayTest" );
}
}
?>
| faclib/ezcomponents | Cache/tests/storage_file_evalarray_test.php | PHP | bsd-3-clause | 609 |
package org.apollo.game.message.impl;
import org.apollo.net.message.Message;
/**
* A {@link Message} sent to the client to open up the enter amount interface.
*
* @author Graham
*/
public final class EnterAmountMessage extends Message {
} | garyttierney/apollo | game/src/main/org/apollo/game/message/impl/EnterAmountMessage.java | Java | isc | 245 |
require 'spec_helper'
describe Gitlab::GithubImport::LabelFormatter, lib: true do
let(:project) { create(:project) }
let(:raw) { double(name: 'improvements', color: 'e6e6e6') }
subject { described_class.new(project, raw) }
describe '#attributes' do
it 'returns formatted attributes' do
expect(subject.attributes).to eq({
project: project,
title: 'improvements',
color: '#e6e6e6'
})
end
end
describe '#create!' do
context 'when label does not exist' do
it 'creates a new label' do
expect { subject.create! }.to change(Label, :count).by(1)
end
end
context 'when label exists' do
it 'does not create a new label' do
project.labels.create(name: raw.name)
expect { subject.create! }.not_to change(Label, :count)
end
end
end
end
| shinexiao/gitlabhq | spec/lib/gitlab/github_import/label_formatter_spec.rb | Ruby | mit | 851 |
// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/**
* @name: S15.9.5.11_A2_T1;
* @section: 15.9.5.11;
* @assertion: The "length" property of the "getUTCFullYear" is 0;
* @description: The "length" property of the "getUTCFullYear" is 0;
*/
if(Date.prototype.getUTCFullYear.hasOwnProperty("length") !== true){
$ERROR('#1: The getUTCFullYear has a "length" property');
}
if(Date.prototype.getUTCFullYear.length !== 0){
$ERROR('#2: The "length" property of the getUTCFullYear is 0');
}
| seraum/nectarjs | tests/ES3/Conformance/15_Native_ECMA_Script_Objects/15.9_Date_Objects/15.9.5_Properties_of_the_Date_Prototype_Object/15.9.5.11_Date.prototype.getUTCFullYear/S15.9.5.11_A2_T1.js | JavaScript | mit | 579 |
/*
Copyright (c) 2015, Marc Clifton
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of MyXaml nor the names of its contributors may be
used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text;
using System.Threading.Tasks;
namespace Clifton.WebServer
{
/// <summary>
/// A route entry, consisting of optional session, authorization, and routing providers.
/// </summary>
public class RouteEntry
{
public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> SessionExpirationHandler;
public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> AuthorizationHandler;
public Func<WorkflowContinuation<ContextWrapper>, ContextWrapper, Session, PathParams, WorkflowState> RouteHandler;
}
}
| cliftonm/WebServersSuccinctly | Examples/Chapter 9/Clifton.WebServer/RouteEntry.cs | C# | mit | 2,183 |
class Admin::PublicationsController < Admin::EditionsController
before_filter :pre_fill_edition_from_statistics_announcement, only: :new, if: :statistics_announcement
private
def edition_class
Publication
end
def permitted_edition_attributes
super << :statistics_announcement_id
end
def pre_fill_edition_from_statistics_announcement
@edition.statistics_announcement_id = statistics_announcement.id
@edition.title = statistics_announcement.title
@edition.summary = statistics_announcement.summary
@edition.publication_type = statistics_announcement.publication_type
@edition.topics = statistics_announcement.topics
@edition.scheduled_publication = statistics_announcement.release_date
@edition.previously_published = "false"
end
def statistics_announcement
if params[:statistics_announcement_id]
@statistics_announcement ||= StatisticsAnnouncement.friendly.find(params[:statistics_announcement_id])
end
end
end
| YOTOV-LIMITED/whitehall | app/controllers/admin/publications_controller.rb | Ruby | mit | 985 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Tests
* @package Tests_Functional
* @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
namespace Mage\Directory\Test\Block\Currency;
use Mage\CurrencySymbol\Test\Fixture\CurrencySymbolEntity;
use Magento\Mtf\Block\Block;
use Magento\Mtf\Client\Locator;
/**
* Switcher Currency Symbol.
*/
class Switcher extends Block
{
/**
* Currency switch locator.
*
* @var string
*/
protected $currencySwitch = '#select-currency';
/**
* Selected Currency switch locator.
*
* @var string
*/
protected $currencySwitchSelected = '#select-currency [selected="selected"]';
/**
* Switch currency to specified one.
*
* @param CurrencySymbolEntity $currencySymbol
* @return void
*/
public function switchCurrency(CurrencySymbolEntity $currencySymbol)
{
$this->waitForElementVisible($this->currencySwitch);
$customCurrencySwitch = explode(" - ", $this->_rootElement->find($this->currencySwitchSelected)->getText());
$currencyCode = $currencySymbol->getCode();
if ($customCurrencySwitch[1] !== $currencyCode) {
$this->_rootElement->find($this->currencySwitch, Locator::SELECTOR_CSS, 'select')
->setValue($currencyCode);
}
}
}
| hansbonini/cloud9-magento | www/dev/tests/functional/tests/app/Mage/Directory/Test/Block/Currency/Switcher.php | PHP | mit | 2,153 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_Dataflow
* @copyright Copyright (c) 2006-2016 X.commerce, Inc. and affiliates (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Import collection
*
* @category Mage
* @package Mage_Dataflow
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Dataflow_Model_Resource_Import_Collection extends Mage_Core_Model_Resource_Db_Collection_Abstract
{
/**
* Define resource model and model
*
*/
protected function _construct()
{
$this->_init('dataflow/import');
}
}
| hansbonini/cloud9-magento | www/app/code/core/Mage/Dataflow/Model/Resource/Import/Collection.php | PHP | mit | 1,372 |
<?php
CM_Db_Db::exec('ALTER TABLE `cm_actionLimit` CHANGE `actionType` `actionType` INT UNSIGNED DEFAULT NULL');
| zazabe/cm | resources/db/update/35.php | PHP | mit | 114 |
// Spart License (zlib/png)
//
//
// Copyright (c) 2003 Jonathan de Halleux
//
// This software is provided 'as-is', without any express or implied warranty.
// In no event will the authors be held liable for any damages arising from
// the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software in a
// product, an acknowledgment in the product documentation would be
// appreciated but is not required.
//
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
//
// 3. This notice may not be removed or altered from any source distribution.
//
// Author: Jonathan de Halleuxusing System;
using System;
using System.Collections;
namespace Spart.Actions
{
/// <summary>
/// Static helper class that creates actors
/// </summary>
public class ActionHandlers
{
/// <summary>
/// Create an actor that append the parse result to a <see cref="IList"/>.
/// </summary>
/// <param name="list"></param>
/// <returns></returns>
public static ActionHandler Append(IList list)
{
return delegate(Object sender, ActionEventArgs args)
{
list.Add(args.Value);
};
}
/// <summary>
/// Creates an actor that throws an exception
/// </summary>
/// <param name="ex"></param>
/// <returns></returns>
public static ActionHandler Throw(Exception ex)
{
return delegate
{
throw ex;
};
}
}
}
| darcywong00/libpalaso | Palaso/Spart/Actions/Actions.cs | C# | mit | 1,724 |
<?php
/**
* phpDocumentor
*
* PHP Version 5.4
*
* @copyright 2010-2014 Mike van Riel / Naenius (http://www.naenius.com)
* @license http://www.opensource.org/licenses/mit-license.php MIT
* @link http://phpdoc.org
*/
namespace phpDocumentor\Transformer\Router;
use phpDocumentor\Configuration;
use phpDocumentor\Transformer\Configuration\ExternalClassDocumentation;
class ExternalRouterTest extends \PHPUnit_Framework_TestCase
{
/**
* @covers phpDocumentor\Transformer\Router\ExternalRouter::__construct
* @covers phpDocumentor\Transformer\Router\ExternalRouter::configure
* @covers phpDocumentor\Transformer\Router\ExternalRouter::match
*/
public function testIfNoUrlIsGeneratedWhenThereIsNoDefinition()
{
// Arrange
$config = new Configuration();
$router = new ExternalRouter($config);
// Act
$result = $router->match('My_Space_With_Suffix');
// Assert
$this->assertNull($result);
}
/**
* @covers phpDocumentor\Transformer\Router\ExternalRouter::__construct
* @covers phpDocumentor\Transformer\Router\ExternalRouter::configure
* @covers phpDocumentor\Transformer\Router\ExternalRouter::match
*/
public function testIfSingleDefinitionGeneratesAnUrl()
{
// Arrange
$config = new Configuration();
$config->getTransformer()->setExternalClassDocumentation(
array(new ExternalClassDocumentation('My_Space', 'http://abc/{CLASS}.html'))
);
$router = new ExternalRouter($config);
// Act
$result = $router->match('My_Space_With_Suffix')->generate('My_Space_With_Suffix');
// Assert
$this->assertSame('http://abc/My_Space_With_Suffix.html', $result);
}
/**
* @covers phpDocumentor\Transformer\Router\ExternalRouter::__construct
* @covers phpDocumentor\Transformer\Router\ExternalRouter::configure
* @covers phpDocumentor\Transformer\Router\ExternalRouter::match
*/
public function testIfMultipleDefinitionsGenerateAnUrl()
{
// Arrange
$config = new Configuration();
$config->getTransformer()->setExternalClassDocumentation(
array(
new ExternalClassDocumentation('My_Zen_Space', 'http://abc/zen/{CLASS}.html'),
new ExternalClassDocumentation('My_Space', 'http://abc/{CLASS}.html')
)
);
$router = new ExternalRouter($config);
// Act
$result = $router->match('My_Space_With_Suffix')->generate('My_Space_With_Suffix');
// Assert
$this->assertSame('http://abc/My_Space_With_Suffix.html', $result);
}
}
| Maxim-Mazurok/phpDocumentor2 | tests/unit/phpDocumentor/Transformer/Router/ExternalRouterTest.php | PHP | mit | 2,689 |
<?php
/**
* Copyright © 2013-2017 Magento, Inc. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\TestModule4\Service\V1\Entity;
class NestedDataObjectRequest extends \Magento\Framework\Api\AbstractExtensibleObject
{
/**
* @return \Magento\TestModule4\Service\V1\Entity\DataObjectRequest
*/
public function getDetails()
{
return $this->_get('details');
}
/**
* @param \Magento\TestModule4\Service\V1\Entity\DataObjectRequest $details
* @return $this
*/
public function setDetails(\Magento\TestModule4\Service\V1\Entity\DataObjectRequest $details = null)
{
return $this->setData('details', $details);
}
}
| j-froehlich/magento2_wk | vendor/magento/magento2-base/dev/tests/api-functional/_files/Magento/TestModule4/Service/V1/Entity/NestedDataObjectRequest.php | PHP | mit | 714 |