code stringlengths 4 1.01M |
|---|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
@import "theme.css";
/* Highlights */
.highlight .k,
.highlight .kd {
color: #263673;
}
/* Text */
body,
h1,
h2,
.rst-content .toctree-wrapper p.caption,
h3,
h4,
h5,
h6,
legend,
input {
color: #010101;
letter-spacing: 0.3px
}
p {
font-size: 100%; /* Get rid of RTD rule that assumes nobody changes their browser font size */
}
/* Links */
a {
color: #6ca158
}
a:hover {
color: #6ca158
}
a:visited {
color: #ADAFB3
}
/* Side navigation bar */
.wy-side-nav-search {
background-color: #252627;
}
.wy-side-nav-search a.icon-home {
color: transparent;
/* background-image: url('../images/fabric1.png'); */
background-repeat: no-repeat;
background-size: Auto 20px;
background-position: center top;
background-origin: content box;
height: 20px;
width: 100%
}
.wy-side-nav-search input[type=text] {
border-radius: 5px
}
.wy-menu-vertical a:hover {
background-color: #ADAFB3;
color: #FFF
}
.wy-nav-content {
background-color: #fff max-width: 1000px;
}
.wy-nav-side {
background-color: #252627;
}
/* Navigation headers */
.rst-content tt.literal,
.rst-content tt.literal,
.rst-content code.literal {
color: ##d43232;
text-transform: none;
}
.wy-menu-vertical header,
.wy-menu-vertical p.caption {
color: #6ca158;
}
/* Code snippets */
.codesnippet-widgets {
min-width: 100%;
display: block;
background: #005CAB;
color: white;
padding: 10px 0;
margin: 0 0 -1px 0;
}
.codesnippet-widgets > span {
padding: 10px;
cursor: pointer;
}
.codesnippet-widgets > .current {
background: #263673;
}
.codeset > .highlight-java {
display: none;
}
/* Notification boxes */
.wy-alert.wy-alert-warning .wy-alert-title,
.rst-content .wy-alert-warning.note .wy-alert-title,
.rst-content .attention .wy-alert-title,
.rst-content .caution .wy-alert-title,
.rst-content .wy-alert-warning.danger .wy-alert-title,
.rst-content .wy-alert-warning.error .wy-alert-title,
.rst-content .wy-alert-warning.hint .wy-alert-title,
.rst-content .wy-alert-warning.important .wy-alert-title,
.rst-content .wy-alert-warning.tip .wy-alert-title,
.rst-content .warning .wy-alert-title,
.rst-content .wy-alert-warning.seealso .wy-alert-title,
.rst-content .admonition-todo .wy-alert-title,
.wy-alert.wy-alert-warning .rst-content .admonition-title,
.rst-content .wy-alert.wy-alert-warning .admonition-title,
.rst-content .wy-alert-warning.note .admonition-title,
.rst-content .attention .admonition-title,
.rst-content .caution .admonition-title,
.rst-content .wy-alert-warning.danger .admonition-title,
.rst-content .wy-alert-warning.error .admonition-title,
.rst-content .wy-alert-warning.hint .admonition-title,
.rst-content .wy-alert-warning.important .admonition-title,
.rst-content .wy-alert-warning.tip .admonition-title,
.rst-content .warning .admonition-title,
.rst-content .wy-alert-warning.seealso .admonition-title,
.rst-content .admonition-todo .admonition-title {
background-color: #263673
}
.wy-alert,
.rst-content .note,
.rst-content .attention,
.rst-content .caution,
.rst-content .danger,
.rst-content .error,
.rst-content .hint,
.rst-content .important,
.rst-content .tip,
.rst-content .warning,
.rst-content .seealso,
.rst-content .admonition-todo {
background-color: #d9e5ef
}
/* Mobile view */
.wy-nav-top {
background-color: #252627;
}
.wy-nav-top a {
color: transparent;
/* background-image: url('../images/fabric1.png'); */
background-repeat: no-repeat;
background-size: Auto 19px;
background-position: center top;
background-origin: content box;
}
|
/**
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.hystrix;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import rx.Observable;
import rx.Observable.OnSubscribe;
import rx.Subscriber;
import com.netflix.hystrix.exception.HystrixBadRequestException;
import com.netflix.hystrix.exception.HystrixRuntimeException;
import com.netflix.hystrix.exception.HystrixRuntimeException.FailureType;
import com.netflix.hystrix.strategy.executionhook.HystrixCommandExecutionHook;
import com.netflix.hystrix.strategy.properties.HystrixPropertiesStrategy;
/**
* Used to wrap code that will execute potentially risky functionality (typically meaning a service call over the network)
* with fault and latency tolerance, statistics and performance metrics capture, circuit breaker and bulkhead functionality.
* This command is essentially a blocking command but provides an Observable facade if used with observe()
*
* @param <R>
* the return type
*
* @ThreadSafe
*/
public abstract class HystrixCommand<R> extends AbstractCommand<R> implements HystrixExecutable<R>, HystrixInvokableInfo<R>, HystrixObservable<R> {
/**
* Construct a {@link HystrixCommand} with defined {@link HystrixCommandGroupKey}.
* <p>
* The {@link HystrixCommandKey} will be derived from the implementing class name.
*
* @param group
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interact with,
* common business purpose etc.
*/
protected HystrixCommand(HystrixCommandGroupKey group) {
// use 'null' to specify use the default
this(new Setter(group));
}
/**
* Construct a {@link HystrixCommand} with defined {@link HystrixCommandGroupKey} and {@link HystrixThreadPoolKey}.
* <p>
* The {@link HystrixCommandKey} will be derived from the implementing class name.
*
* @param group
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interact with,
* common business purpose etc.
* @param threadPool
* {@link HystrixThreadPoolKey} used to identify the thread pool in which a {@link HystrixCommand} executes.
*/
protected HystrixCommand(HystrixCommandGroupKey group, HystrixThreadPoolKey threadPool) {
this(new Setter(group).andThreadPoolKey(threadPool));
}
/**
* Construct a {@link HystrixCommand} with defined {@link HystrixCommandGroupKey} and thread timeout
* <p>
* The {@link HystrixCommandKey} will be derived from the implementing class name.
*
* @param group
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interact with,
* common business purpose etc.
* @param executionIsolationThreadTimeoutInMilliseconds
* Time in milliseconds at which point the calling thread will timeout (using {@link Future#get}) and walk away from the executing thread.
*/
protected HystrixCommand(HystrixCommandGroupKey group, int executionIsolationThreadTimeoutInMilliseconds) {
this(new Setter(group).andCommandPropertiesDefaults(HystrixCommandProperties.Setter().withExecutionTimeoutInMilliseconds(executionIsolationThreadTimeoutInMilliseconds)));
}
/**
* Construct a {@link HystrixCommand} with defined {@link HystrixCommandGroupKey}, {@link HystrixThreadPoolKey}, and thread timeout.
* <p>
* The {@link HystrixCommandKey} will be derived from the implementing class name.
*
* @param group
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interact with,
* common business purpose etc.
* @param threadPool
* {@link HystrixThreadPoolKey} used to identify the thread pool in which a {@link HystrixCommand} executes.
* @param executionIsolationThreadTimeoutInMilliseconds
* Time in milliseconds at which point the calling thread will timeout (using {@link Future#get}) and walk away from the executing thread.
*/
protected HystrixCommand(HystrixCommandGroupKey group, HystrixThreadPoolKey threadPool, int executionIsolationThreadTimeoutInMilliseconds) {
this(new Setter(group).andThreadPoolKey(threadPool).andCommandPropertiesDefaults(HystrixCommandProperties.Setter().withExecutionTimeoutInMilliseconds(executionIsolationThreadTimeoutInMilliseconds)));
}
/**
* Construct a {@link HystrixCommand} with defined {@link Setter} that allows injecting property and strategy overrides and other optional arguments.
* <p>
* NOTE: The {@link HystrixCommandKey} is used to associate a {@link HystrixCommand} with {@link HystrixCircuitBreaker}, {@link HystrixCommandMetrics} and other objects.
* <p>
* Do not create multiple {@link HystrixCommand} implementations with the same {@link HystrixCommandKey} but different injected default properties as the first instantiated will win.
* <p>
* Properties passed in via {@link Setter#andCommandPropertiesDefaults} or {@link Setter#andThreadPoolPropertiesDefaults} are cached for the given {@link HystrixCommandKey} for the life of the JVM
* or until {@link Hystrix#reset()} is called. Dynamic properties allow runtime changes. Read more on the <a href="https://github.com/Netflix/Hystrix/wiki/Configuration">Hystrix Wiki</a>.
*
* @param setter
* Fluent interface for constructor arguments
*/
protected HystrixCommand(Setter setter) {
// use 'null' to specify use the default
this(setter.groupKey, setter.commandKey, setter.threadPoolKey, null, null, setter.commandPropertiesDefaults, setter.threadPoolPropertiesDefaults, null, null, null, null, null);
}
/**
* Allow constructing a {@link HystrixCommand} with injection of most aspects of its functionality.
* <p>
* Some of these never have a legitimate reason for injection except in unit testing.
* <p>
* Most of the args will revert to a valid default if 'null' is passed in.
*/
/* package for testing */HystrixCommand(HystrixCommandGroupKey group, HystrixCommandKey key, HystrixThreadPoolKey threadPoolKey, HystrixCircuitBreaker circuitBreaker, HystrixThreadPool threadPool,
HystrixCommandProperties.Setter commandPropertiesDefaults, HystrixThreadPoolProperties.Setter threadPoolPropertiesDefaults,
HystrixCommandMetrics metrics, TryableSemaphore fallbackSemaphore, TryableSemaphore executionSemaphore,
HystrixPropertiesStrategy propertiesStrategy, HystrixCommandExecutionHook executionHook) {
super(group, key, threadPoolKey, circuitBreaker, threadPool, commandPropertiesDefaults, threadPoolPropertiesDefaults, metrics, fallbackSemaphore, executionSemaphore, propertiesStrategy, executionHook);
}
/**
* Fluent interface for arguments to the {@link HystrixCommand} constructor.
* <p>
* The required arguments are set via the 'with' factory method and optional arguments via the 'and' chained methods.
* <p>
* Example:
* <pre> {@code
* Setter.withGroupKey(HystrixCommandGroupKey.Factory.asKey("GroupName"))
.andCommandKey(HystrixCommandKey.Factory.asKey("CommandName"));
* } </pre>
*
* @NotThreadSafe
*/
final public static class Setter {
protected final HystrixCommandGroupKey groupKey;
protected HystrixCommandKey commandKey;
protected HystrixThreadPoolKey threadPoolKey;
protected HystrixCommandProperties.Setter commandPropertiesDefaults;
protected HystrixThreadPoolProperties.Setter threadPoolPropertiesDefaults;
/**
* Setter factory method containing required values.
* <p>
* All optional arguments can be set via the chained methods.
*
* @param groupKey
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interace
* with,
* common business purpose etc.
*/
protected Setter(HystrixCommandGroupKey groupKey) {
this.groupKey = groupKey;
}
/**
* Setter factory method with required values.
* <p>
* All optional arguments can be set via the chained methods.
*
* @param groupKey
* {@link HystrixCommandGroupKey} used to group together multiple {@link HystrixCommand} objects.
* <p>
* The {@link HystrixCommandGroupKey} is used to represent a common relationship between commands. For example, a library or team name, the system all related commands interace
* with,
* common business purpose etc.
*/
public static Setter withGroupKey(HystrixCommandGroupKey groupKey) {
return new Setter(groupKey);
}
/**
* @param commandKey
* {@link HystrixCommandKey} used to identify a {@link HystrixCommand} instance for statistics, circuit-breaker, properties, etc.
* <p>
* By default this will be derived from the instance class name.
* <p>
* NOTE: Every unique {@link HystrixCommandKey} will result in new instances of {@link HystrixCircuitBreaker}, {@link HystrixCommandMetrics} and {@link HystrixCommandProperties}.
* Thus,
* the number of variants should be kept to a finite and reasonable number to avoid high-memory usage or memory leacks.
* <p>
* Hundreds of keys is fine, tens of thousands is probably not.
* @return Setter for fluent interface via method chaining
*/
public Setter andCommandKey(HystrixCommandKey commandKey) {
this.commandKey = commandKey;
return this;
}
/**
* @param threadPoolKey
* {@link HystrixThreadPoolKey} used to define which thread-pool this command should run in (when configured to run on separate threads via
* {@link HystrixCommandProperties#executionIsolationStrategy()}).
* <p>
* By default this is derived from the {@link HystrixCommandGroupKey} but if injected this allows multiple commands to have the same {@link HystrixCommandGroupKey} but different
* thread-pools.
* @return Setter for fluent interface via method chaining
*/
public Setter andThreadPoolKey(HystrixThreadPoolKey threadPoolKey) {
this.threadPoolKey = threadPoolKey;
return this;
}
/**
* Optional
*
* @param commandPropertiesDefaults
* {@link HystrixCommandProperties.Setter} with property overrides for this specific instance of {@link HystrixCommand}.
* <p>
* See the {@link HystrixPropertiesStrategy} JavaDocs for more information on properties and order of precedence.
* @return Setter for fluent interface via method chaining
*/
public Setter andCommandPropertiesDefaults(HystrixCommandProperties.Setter commandPropertiesDefaults) {
this.commandPropertiesDefaults = commandPropertiesDefaults;
return this;
}
/**
* Optional
*
* @param threadPoolPropertiesDefaults
* {@link HystrixThreadPoolProperties.Setter} with property overrides for the {@link HystrixThreadPool} used by this specific instance of {@link HystrixCommand}.
* <p>
* See the {@link HystrixPropertiesStrategy} JavaDocs for more information on properties and order of precedence.
* @return Setter for fluent interface via method chaining
*/
public Setter andThreadPoolPropertiesDefaults(HystrixThreadPoolProperties.Setter threadPoolPropertiesDefaults) {
this.threadPoolPropertiesDefaults = threadPoolPropertiesDefaults;
return this;
}
}
/**
* Implement this method with code to be executed when {@link #execute()} or {@link #queue()} are invoked.
*
* @return R response type
* @throws Exception
* if command execution fails
*/
protected abstract R run() throws Exception;
/**
* If {@link #execute()} or {@link #queue()} fails in any way then this method will be invoked to provide an opportunity to return a fallback response.
* <p>
* This should do work that does not require network transport to produce.
* <p>
* In other words, this should be a static or cached result that can immediately be returned upon failure.
* <p>
* If network traffic is wanted for fallback (such as going to MemCache) then the fallback implementation should invoke another {@link HystrixCommand} instance that protects against that network
* access and possibly has another level of fallback that does not involve network access.
* <p>
* DEFAULT BEHAVIOR: It throws UnsupportedOperationException.
*
* @return R or throw UnsupportedOperationException if not implemented
*/
protected R getFallback() {
throw new UnsupportedOperationException("No fallback available.");
}
@Override
final protected Observable<R> getExecutionObservable() {
return Observable.create(new OnSubscribe<R>() {
@Override
public void call(Subscriber<? super R> s) {
try {
s.onNext(run());
s.onCompleted();
} catch (Throwable e) {
s.onError(e);
}
}
});
}
@Override
final protected Observable<R> getFallbackObservable() {
return Observable.create(new OnSubscribe<R>() {
@Override
public void call(Subscriber<? super R> s) {
try {
s.onNext(getFallback());
s.onCompleted();
} catch (Throwable e) {
s.onError(e);
}
}
});
}
/**
* Used for synchronous execution of command.
*
* @return R
* Result of {@link #run()} execution or a fallback from {@link #getFallback()} if the command fails for any reason.
* @throws HystrixRuntimeException
* if a failure occurs and a fallback cannot be retrieved
* @throws HystrixBadRequestException
* if invalid arguments or state were used representing a user failure, not a system failure
* @throws IllegalStateException
* if invoked more than once
*/
public R execute() {
try {
return queue().get();
} catch (Exception e) {
throw decomposeException(e);
}
}
/**
* Used for asynchronous execution of command.
* <p>
* This will queue up the command on the thread pool and return an {@link Future} to get the result once it completes.
* <p>
* NOTE: If configured to not run in a separate thread, this will have the same effect as {@link #execute()} and will block.
* <p>
* We don't throw an exception but just flip to synchronous execution so code doesn't need to change in order to switch a command from running on a separate thread to the calling thread.
*
* @return {@code Future<R>} Result of {@link #run()} execution or a fallback from {@link #getFallback()} if the command fails for any reason.
* @throws HystrixRuntimeException
* if a fallback does not exist
* <p>
* <ul>
* <li>via {@code Future.get()} in {@link ExecutionException#getCause()} if a failure occurs</li>
* <li>or immediately if the command can not be queued (such as short-circuited, thread-pool/semaphore rejected)</li>
* </ul>
* @throws HystrixBadRequestException
* via {@code Future.get()} in {@link ExecutionException#getCause()} if invalid arguments or state were used representing a user failure, not a system failure
* @throws IllegalStateException
* if invoked more than once
*/
public Future<R> queue() {
/*
* --- Schedulers.immediate()
*
* We use the 'immediate' schedule since Future.get() is blocking so we don't want to bother doing the callback to the Future on a separate thread
* as we don't need to separate the Hystrix thread from user threads since they are already providing it via the Future.get() call.
*
* We pass 'false' to tell the Observable we will block on it so it doesn't schedule an async timeout.
*
* This optimizes for using the calling thread to do the timeout rather than scheduling another thread.
*
* In a tight-loop of executing commands this optimization saves a few microseconds per execution.
* It also just makes no sense to use a separate thread to timeout the command when the calling thread
* is going to sit waiting on it.
*/
final Observable<R> o = toObservable();
final Future<R> f = o.toBlocking().toFuture();
/* special handling of error states that throw immediately */
if (f.isDone()) {
try {
f.get();
return f;
} catch (Exception e) {
RuntimeException re = decomposeException(e);
if (re instanceof HystrixBadRequestException) {
return f;
} else if (re instanceof HystrixRuntimeException) {
HystrixRuntimeException hre = (HystrixRuntimeException) re;
if (hre.getFailureType() == FailureType.COMMAND_EXCEPTION || hre.getFailureType() == FailureType.TIMEOUT) {
// we don't throw these types from queue() only from queue().get() as they are execution errors
return f;
} else {
// these are errors we throw from queue() as they as rejection type errors
throw hre;
}
} else {
throw re;
}
}
}
return f;
}
@Override
protected String getFallbackMethodName() {
return "getFallback";
}
}
|
(function () {
// Append the bind() polyfill
var scriptElem = document.createElement('script');
scriptElem.setAttribute('src', 'scripts/android2.3-jscompat.js');
if (document.body) {
document.body.appendChild(scriptElem);
} else {
document.head.appendChild(scriptElem);
}
}()); |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.properties;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.camel.Endpoint;
import org.apache.camel.impl.UriEndpointComponent;
import org.apache.camel.util.FilePathResolver;
import org.apache.camel.util.LRUSoftCache;
import org.apache.camel.util.ObjectHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The <a href="http://camel.apache.org/properties">Properties Component</a> allows you to use property placeholders when defining Endpoint URIs
*/
public class PropertiesComponent extends UriEndpointComponent {
/**
* The default prefix token.
*/
public static final String DEFAULT_PREFIX_TOKEN = "{{";
/**
* The default suffix token.
*/
public static final String DEFAULT_SUFFIX_TOKEN = "}}";
/**
* The default prefix token.
* @deprecated Use {@link #DEFAULT_PREFIX_TOKEN} instead.
*/
@Deprecated
public static final String PREFIX_TOKEN = DEFAULT_PREFIX_TOKEN;
/**
* The default suffix token.
* @deprecated Use {@link #DEFAULT_SUFFIX_TOKEN} instead.
*/
@Deprecated
public static final String SUFFIX_TOKEN = DEFAULT_SUFFIX_TOKEN;
/**
* Never check system properties.
*/
public static final int SYSTEM_PROPERTIES_MODE_NEVER = 0;
/**
* Check system properties if not resolvable in the specified properties.
*/
public static final int SYSTEM_PROPERTIES_MODE_FALLBACK = 1;
/**
* Check system properties first, before trying the specified properties.
* This allows system properties to override any other property source.
* <p/>
* This is the default.
*/
public static final int SYSTEM_PROPERTIES_MODE_OVERRIDE = 2;
/**
* Key for stores special override properties that containers such as OSGi can store
* in the OSGi service registry
*/
public static final String OVERRIDE_PROPERTIES = PropertiesComponent.class.getName() + ".OverrideProperties";
private static final Logger LOG = LoggerFactory.getLogger(PropertiesComponent.class);
private final Map<CacheKey, Properties> cacheMap = new LRUSoftCache<CacheKey, Properties>(1000);
private final Map<String, PropertiesFunction> functions = new HashMap<String, PropertiesFunction>();
private PropertiesResolver propertiesResolver = new DefaultPropertiesResolver(this);
private PropertiesParser propertiesParser = new DefaultPropertiesParser(this);
private boolean isDefaultCreated;
private String[] locations;
private boolean ignoreMissingLocation;
private String encoding;
private boolean cache = true;
private String propertyPrefix;
private String propertyPrefixResolved;
private String propertySuffix;
private String propertySuffixResolved;
private boolean fallbackToUnaugmentedProperty = true;
private String prefixToken = DEFAULT_PREFIX_TOKEN;
private String suffixToken = DEFAULT_SUFFIX_TOKEN;
private Properties initialProperties;
private Properties overrideProperties;
private int systemPropertiesMode = SYSTEM_PROPERTIES_MODE_OVERRIDE;
public PropertiesComponent() {
super(PropertiesEndpoint.class);
// include out of the box functions
addFunction(new EnvPropertiesFunction());
addFunction(new SysPropertiesFunction());
addFunction(new ServicePropertiesFunction());
addFunction(new ServiceHostPropertiesFunction());
addFunction(new ServicePortPropertiesFunction());
}
public PropertiesComponent(boolean isDefaultCreated) {
this();
this.isDefaultCreated = isDefaultCreated;
}
public PropertiesComponent(String location) {
this();
setLocation(location);
}
public PropertiesComponent(String... locations) {
this();
setLocations(locations);
}
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
String[] paths = locations;
// override default locations
String locations = getAndRemoveParameter(parameters, "locations", String.class);
Boolean ignoreMissingLocationLoc = getAndRemoveParameter(parameters, "ignoreMissingLocation", Boolean.class);
if (locations != null) {
LOG.trace("Overriding default locations with location: {}", locations);
paths = locations.split(",");
}
if (ignoreMissingLocationLoc != null) {
ignoreMissingLocation = ignoreMissingLocationLoc;
}
String endpointUri = parseUri(remaining, paths);
LOG.debug("Endpoint uri parsed as: {}", endpointUri);
Endpoint delegate = getCamelContext().getEndpoint(endpointUri);
PropertiesEndpoint answer = new PropertiesEndpoint(uri, delegate, this);
setProperties(answer, parameters);
return answer;
}
public String parseUri(String uri) throws Exception {
return parseUri(uri, locations);
}
public String parseUri(String uri, String... paths) throws Exception {
Properties prop = new Properties();
// use initial properties
if (null != initialProperties) {
prop.putAll(initialProperties);
}
// use locations
if (paths != null) {
// location may contain JVM system property or OS environment variables
// so we need to parse those
String[] locations = parseLocations(paths);
// check cache first
CacheKey key = new CacheKey(locations);
Properties locationsProp = cache ? cacheMap.get(key) : null;
if (locationsProp == null) {
locationsProp = propertiesResolver.resolveProperties(getCamelContext(), ignoreMissingLocation, locations);
if (cache) {
cacheMap.put(key, locationsProp);
}
}
prop.putAll(locationsProp);
}
// use override properties
if (overrideProperties != null) {
// make a copy to avoid affecting the original properties
Properties override = new Properties();
override.putAll(prop);
override.putAll(overrideProperties);
prop = override;
}
// enclose tokens if missing
if (!uri.contains(prefixToken) && !uri.startsWith(prefixToken)) {
uri = prefixToken + uri;
}
if (!uri.contains(suffixToken) && !uri.endsWith(suffixToken)) {
uri = uri + suffixToken;
}
LOG.trace("Parsing uri {} with properties: {}", uri, prop);
if (propertiesParser instanceof AugmentedPropertyNameAwarePropertiesParser) {
return ((AugmentedPropertyNameAwarePropertiesParser) propertiesParser).parseUri(uri, prop, prefixToken, suffixToken,
propertyPrefixResolved, propertySuffixResolved, fallbackToUnaugmentedProperty);
} else {
return propertiesParser.parseUri(uri, prop, prefixToken, suffixToken);
}
}
/**
* Is this component created as a default by {@link org.apache.camel.CamelContext} during starting up Camel.
*/
public boolean isDefaultCreated() {
return isDefaultCreated;
}
public String[] getLocations() {
return locations;
}
/**
* A list of locations to load properties. You can use comma to separate multiple locations.
* This option will override any default locations and only use the locations from this option.
*/
public void setLocations(String[] locations) {
// make sure to trim as people may use new lines when configuring using XML
// and do this in the setter as Spring/Blueprint resolves placeholders before Camel is being started
if (locations != null && locations.length > 0) {
for (int i = 0; i < locations.length; i++) {
String loc = locations[i];
locations[i] = loc.trim();
}
}
this.locations = locations;
}
/**
* A list of locations to load properties. You can use comma to separate multiple locations.
* This option will override any default locations and only use the locations from this option.
*/
public void setLocation(String location) {
setLocations(location.split(","));
}
public String getEncoding() {
return encoding;
}
/**
* Encoding to use when loading properties file from the file system or classpath.
* <p/>
* If no encoding has been set, then the properties files is loaded using ISO-8859-1 encoding (latin-1)
* as documented by {@link java.util.Properties#load(java.io.InputStream)}
*/
public void setEncoding(String encoding) {
this.encoding = encoding;
}
public PropertiesResolver getPropertiesResolver() {
return propertiesResolver;
}
/**
* To use a custom PropertiesResolver
*/
public void setPropertiesResolver(PropertiesResolver propertiesResolver) {
this.propertiesResolver = propertiesResolver;
}
public PropertiesParser getPropertiesParser() {
return propertiesParser;
}
/**
* To use a custom PropertiesParser
*/
public void setPropertiesParser(PropertiesParser propertiesParser) {
this.propertiesParser = propertiesParser;
}
public boolean isCache() {
return cache;
}
/**
* Whether or not to cache loaded properties. The default value is true.
*/
public void setCache(boolean cache) {
this.cache = cache;
}
public String getPropertyPrefix() {
return propertyPrefix;
}
/**
* Optional prefix prepended to property names before resolution.
*/
public void setPropertyPrefix(String propertyPrefix) {
this.propertyPrefix = propertyPrefix;
this.propertyPrefixResolved = propertyPrefix;
if (ObjectHelper.isNotEmpty(this.propertyPrefix)) {
this.propertyPrefixResolved = FilePathResolver.resolvePath(this.propertyPrefix);
}
}
public String getPropertySuffix() {
return propertySuffix;
}
/**
* Optional suffix appended to property names before resolution.
*/
public void setPropertySuffix(String propertySuffix) {
this.propertySuffix = propertySuffix;
this.propertySuffixResolved = propertySuffix;
if (ObjectHelper.isNotEmpty(this.propertySuffix)) {
this.propertySuffixResolved = FilePathResolver.resolvePath(this.propertySuffix);
}
}
public boolean isFallbackToUnaugmentedProperty() {
return fallbackToUnaugmentedProperty;
}
/**
* If true, first attempt resolution of property name augmented with propertyPrefix and propertySuffix
* before falling back the plain property name specified. If false, only the augmented property name is searched.
*/
public void setFallbackToUnaugmentedProperty(boolean fallbackToUnaugmentedProperty) {
this.fallbackToUnaugmentedProperty = fallbackToUnaugmentedProperty;
}
public boolean isIgnoreMissingLocation() {
return ignoreMissingLocation;
}
/**
* Whether to silently ignore if a location cannot be located, such as a properties file not found.
*/
public void setIgnoreMissingLocation(boolean ignoreMissingLocation) {
this.ignoreMissingLocation = ignoreMissingLocation;
}
public String getPrefixToken() {
return prefixToken;
}
/**
* Sets the value of the prefix token used to identify properties to replace. Setting a value of
* {@code null} restores the default token (@link {@link #DEFAULT_PREFIX_TOKEN}).
*/
public void setPrefixToken(String prefixToken) {
if (prefixToken == null) {
this.prefixToken = DEFAULT_PREFIX_TOKEN;
} else {
this.prefixToken = prefixToken;
}
}
public String getSuffixToken() {
return suffixToken;
}
/**
* Sets the value of the suffix token used to identify properties to replace. Setting a value of
* {@code null} restores the default token (@link {@link #DEFAULT_SUFFIX_TOKEN}).
*/
public void setSuffixToken(String suffixToken) {
if (suffixToken == null) {
this.suffixToken = DEFAULT_SUFFIX_TOKEN;
} else {
this.suffixToken = suffixToken;
}
}
public Properties getInitialProperties() {
return initialProperties;
}
/**
* Sets initial properties which will be used before any locations are resolved.
*
* @param initialProperties properties that are added first
*/
public void setInitialProperties(Properties initialProperties) {
this.initialProperties = initialProperties;
}
public Properties getOverrideProperties() {
return overrideProperties;
}
/**
* Sets a special list of override properties that take precedence
* and will use first, if a property exist.
*
* @param overrideProperties properties that is used first
*/
public void setOverrideProperties(Properties overrideProperties) {
this.overrideProperties = overrideProperties;
}
/**
* Gets the functions registered in this properties component.
*/
public Map<String, PropertiesFunction> getFunctions() {
return functions;
}
/**
* Registers the {@link org.apache.camel.component.properties.PropertiesFunction} as a function to this component.
*/
public void addFunction(PropertiesFunction function) {
this.functions.put(function.getName(), function);
}
/**
* Is there a {@link org.apache.camel.component.properties.PropertiesFunction} with the given name?
*/
public boolean hasFunction(String name) {
return functions.containsKey(name);
}
public int getSystemPropertiesMode() {
return systemPropertiesMode;
}
/**
* Sets the system property mode.
*
* @see #SYSTEM_PROPERTIES_MODE_NEVER
* @see #SYSTEM_PROPERTIES_MODE_FALLBACK
* @see #SYSTEM_PROPERTIES_MODE_OVERRIDE
*/
public void setSystemPropertiesMode(int systemPropertiesMode) {
this.systemPropertiesMode = systemPropertiesMode;
}
@Override
protected void doStart() throws Exception {
super.doStart();
if (systemPropertiesMode != SYSTEM_PROPERTIES_MODE_NEVER
&& systemPropertiesMode != SYSTEM_PROPERTIES_MODE_FALLBACK
&& systemPropertiesMode != SYSTEM_PROPERTIES_MODE_OVERRIDE) {
throw new IllegalArgumentException("Option systemPropertiesMode has invalid value: " + systemPropertiesMode);
}
// inject the component to the parser
if (propertiesParser instanceof DefaultPropertiesParser) {
((DefaultPropertiesParser) propertiesParser).setPropertiesComponent(this);
}
}
@Override
protected void doStop() throws Exception {
cacheMap.clear();
super.doStop();
}
private String[] parseLocations(String[] locations) {
List<String> answer = new ArrayList<String>();
for (String location : locations) {
LOG.trace("Parsing location: {} ", location);
try {
location = FilePathResolver.resolvePath(location);
LOG.debug("Parsed location: {} ", location);
if (ObjectHelper.isNotEmpty(location)) {
answer.add(location);
}
} catch (IllegalArgumentException e) {
if (!ignoreMissingLocation) {
throw e;
} else {
LOG.debug("Ignored missing location: {}", location);
}
}
}
// must return a not-null answer
return answer.toArray(new String[answer.size()]);
}
/**
* Key used in the locations cache
*/
private static final class CacheKey implements Serializable {
private static final long serialVersionUID = 1L;
private final String[] locations;
private CacheKey(String[] locations) {
this.locations = locations;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CacheKey that = (CacheKey) o;
if (!Arrays.equals(locations, that.locations)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return locations != null ? Arrays.hashCode(locations) : 0;
}
@Override
public String toString() {
return "LocationKey[" + Arrays.asList(locations).toString() + "]";
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.netty;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.junit.jupiter.api.Test;
public class NettyInOutFromSedaTest extends BaseNettyTest {
@Test
public void testInOutIssue() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceivedInAnyOrder("Bye A", "Bye B", "Bye C");
template.sendBody("seda:start", "A");
template.sendBody("seda:start", "B");
template.sendBody("seda:start", "C");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("seda:start")
.log("before ${body}")
.to("netty:tcp://localhost:{{port}}?textline=true&sync=true")
.log("after ${body}")
.to("mock:result");
from("netty:tcp://localhost:{{port}}?textline=true&sync=true")
.transform(body().prepend("Bye "));
}
};
}
}
|
/**
* Copyright 2010-2015 Axel Fontaine
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Private API. No compatibility guarantees provided.
*/
package org.flywaydb.core.internal.dbsupport.vertica; |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experimental iterator_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
class CheckpointInputPipelineHookTest(test.TestCase):
@staticmethod
def _model_fn(features, labels, mode, config):
del labels
del mode
del config
global_step = training_util.get_or_create_global_step()
update_global_step_op = global_step.assign_add(1)
latest_feature = variables.Variable(
0, name='latest_feature', dtype=dtypes.int64)
store_latest_feature_op = latest_feature.assign(features)
ops.add_to_collection('my_vars', global_step)
ops.add_to_collection('my_vars', latest_feature)
return model_fn.EstimatorSpec(
mode='train',
train_op=control_flow_ops.group(
[update_global_step_op, store_latest_feature_op]),
loss=constant_op.constant(2.0))
def _read_vars(self, model_dir):
"""Returns (global_step, latest_feature)."""
with ops.Graph().as_default() as g:
ckpt_path = saver_lib.latest_checkpoint(model_dir)
meta_filename = ckpt_path + '.meta'
saver_lib.import_meta_graph(meta_filename)
saver = saver_lib.Saver()
with self.test_session(graph=g) as sess:
saver.restore(sess, ckpt_path)
return sess.run(ops.get_collection('my_vars'))
def _build_iterator_saver_hook(self, est):
return iterator_ops.CheckpointInputPipelineHook(est)
def testReturnDatasetFromInputFn(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testBuildIteratorInInputFn(self):
def _input_fn():
ds = dataset_ops.Dataset.range(10)
iterator = ds.make_one_shot_iterator()
return iterator.get_next()
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testDoNotRestore(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
# Hook not provided, input pipeline was not restored.
est.train(_input_fn, steps=2)
self.assertSequenceEqual(self._read_vars(est.model_dir), (6, 1))
def testRaiseErrorIfNoIterator(self):
def _input_fn():
return constant_op.constant(1, dtype=dtypes.int64)
est = estimator.Estimator(model_fn=self._model_fn)
with self.assertRaises(ValueError):
est.train(
_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
if __name__ == '__main__':
test.main()
|
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Simple hash functions used for internal data structures
#ifndef TENSORFLOW_LIB_HASH_HASH_H_
#define TENSORFLOW_LIB_HASH_HASH_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
extern uint32 Hash32(const char* data, size_t n, uint32 seed);
extern uint64 Hash64(const char* data, size_t n, uint64 seed);
inline uint64 Hash64(const char* data, size_t n) {
return Hash64(data, n, 0xDECAFCAFFE);
}
inline uint64 Hash64(const string& str) {
return Hash64(str.data(), str.size());
}
inline uint64 Hash64Combine(uint64 a, uint64 b) {
return a ^ (b + 0x9e3779b97f4a7800ULL + (a << 10) + (a >> 4));
}
} // namespace tensorflow
#endif // TENSORFLOW_LIB_HASH_HASH_H_
|
/*
* Copyright 2007 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
namespace com.google.zxing
{
/// <summary> The general exception class throw when something goes wrong during decoding of a barcode.
/// This includes, but is not limited to, failing checksums / error correction algorithms, being
/// unable to locate finder timing patterns, and so on.
///
/// </summary>
/// <author> Sean Owen
/// </author>
/// <author>www.Redivivus.in (suraj.supekar@redivivus.in) - Ported from ZXING Java Source
/// </author>
[Serializable]
public sealed class ReaderException:System.Exception
{
public static ReaderException Instance
{
get
{
// Exception e = new Exception();
// // Take the stack frame before this one.
// StackTraceElement stack = e.getStackTrace()[1];
// String key = stack.getClassName() + "." + stack.getMethodName() + "(), line " +
// stack.getLineNumber();
// if (throwers.containsKey(key)) {
// Integer value = throwers.get(key);
// value++;
// throwers.put(key, value);
// } else {
// throwers.put(key, 1);
// }
// exceptionCount++;
return instance;
}
}
// TODO: Currently we throw up to 400 ReaderExceptions while scanning a single 240x240 image before
// rejecting it. This involves a lot of overhead and memory allocation, and affects both performance
// and latency on continuous scan clients. In the future, we should change all the decoders not to
// throw exceptions for routine events, like not finding a barcode on a given row. Instead, we
// should return error codes back to the callers, and simply delete this class. In the mean time, I
// have altered this class to be as lightweight as possible, by ignoring the exception string, and
// by disabling the generation of stack traces, which is especially time consuming. These are just
// temporary measures, pending the big cleanup.
//UPGRADE_NOTE: Final was removed from the declaration of 'instance '. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
private static readonly ReaderException instance = new ReaderException();
// EXCEPTION TRACKING SUPPORT
// Identifies who is throwing exceptions and how often. To use:
//
// 1. Uncomment these lines and the code below which uses them.
// 2. Uncomment the two corresponding lines in j2se/CommandLineRunner.decode()
// 3. Change core to build as Java 1.5 temporarily
// private static int exceptionCount = 0;
// private static Map<String,Integer> throwers = new HashMap<String,Integer>(32);
private ReaderException()
{
// do nothing
}
// public static int getExceptionCountAndReset() {
// int temp = exceptionCount;
// exceptionCount = 0;
// return temp;
// }
//
// public static String getThrowersAndReset() {
// StringBuilder builder = new StringBuilder(1024);
// Object[] keys = throwers.keySet().toArray();
// for (int x = 0; x < keys.length; x++) {
// String key = (String) keys[x];
// Integer value = throwers.get(key);
// builder.append(key);
// builder.append(": ");
// builder.append(value);
// builder.append("\n");
// }
// throwers.clear();
// return builder.toString();
// }
// Prevent stack traces from being taken
// srowen says: huh, my IDE is saying this is not an override. native methods can't be overridden?
// This, at least, does not hurt. Because we use a singleton pattern here, it doesn't matter anyhow.
//UPGRADE_NOTE: Exception 'java.lang.Throwable' was converted to 'System.Exception' which has different behavior. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1100'"
//UPGRADE_NOTE: The equivalent of method 'java.lang.Throwable.fillInStackTrace' is not an override method. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1143'"
public System.Exception fillInStackTrace()
{
return null;
}
}
} |
class GitCrypt < Formula
desc "Enable transparent encryption/decryption of files in a git repo"
homepage "https://www.agwa.name/projects/git-crypt/"
url "https://www.agwa.name/projects/git-crypt/downloads/git-crypt-0.5.0.tar.gz"
sha256 "0a8f92c0a0a125bf768d0c054d947ca4e4b8d6556454b0e7e87fb907ee17cf06"
bottle do
cellar :any
sha256 "8d7f6640e34881ae40a2e949b7755eb9faa711399c37b86892df359b1a368bb2" => :sierra
sha256 "ed93687aa2996d6171f9090062f24453028c7d9d97e9842a0fee7aee57648979" => :el_capitan
sha256 "ce33f2d01af41259b6ea9be1e849000bdd08413b1f109268ea65709644d455eb" => :yosemite
sha256 "2cedd573983fe7ec7387e76f9ffd0ba351e71e19e3382f7365209d1aad0f7e3f" => :mavericks
sha256 "1bba33a973b90d39140a64193bcdab63b34c3b4f379850ee41ee155325173f4f" => :mountain_lion
end
depends_on "openssl"
def install
system "make"
bin.install "git-crypt"
end
test do
system "#{bin}/git-crypt", "keygen", "keyfile"
end
end
|
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package io.flutter.embedding.engine.mutatorsstack;
import android.graphics.Matrix;
import android.graphics.Path;
import android.graphics.Rect;
import android.graphics.RectF;
import androidx.annotation.Keep;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
/**
* The mutator stack containing a list of mutators
*
* <p>The mutators can be applied to a {@link io.flutter.plugin.platform.PlatformView} to perform a
* series mutations. See {@link FlutterMutatorsStack.FlutterMutator} for informations on Mutators.
*/
@Keep
public class FlutterMutatorsStack {
/**
* The type of a Mutator See {@link FlutterMutatorsStack.FlutterMutator} for informations on
* Mutators.
*/
public enum FlutterMutatorType {
CLIP_RECT,
CLIP_RRECT,
CLIP_PATH,
TRANSFORM,
OPACITY
}
/**
* A class represents a mutator
*
* <p>A mutator contains information of a single mutation operation that can be applied to a
* {@link io.flutter.plugin.platform.PlatformView}. See {@link
* FlutterMutatorsStack.FlutterMutator} for informations on Mutators.
*/
public class FlutterMutator {
@Nullable private Matrix matrix;
@Nullable private Rect rect;
@Nullable private Path path;
@Nullable private float[] radiis;
private FlutterMutatorType type;
/**
* Initialize a clip rect mutator.
*
* @param rect the rect to be clipped.
*/
public FlutterMutator(Rect rect) {
this.type = FlutterMutatorType.CLIP_RECT;
this.rect = rect;
}
/**
* Initialize a clip rrect mutator.
*
* @param rect the rect of the rrect
* @param radiis the radiis of the rrect. Array of 8 values, 4 pairs of [X,Y]. This value cannot
* be null.
*/
public FlutterMutator(Rect rect, float[] radiis) {
this.type = FlutterMutatorType.CLIP_RRECT;
this.rect = rect;
this.radiis = radiis;
}
/**
* Initialize a clip path mutator.
*
* @param path the path to be clipped.
*/
public FlutterMutator(Path path) {
this.type = FlutterMutatorType.CLIP_PATH;
this.path = path;
}
/**
* Initialize a transform mutator.
*
* @param matrix the transform matrix to apply.
*/
public FlutterMutator(Matrix matrix) {
this.type = FlutterMutatorType.TRANSFORM;
this.matrix = matrix;
}
/**
* Get the mutator type.
*
* @return The type of the mutator.
*/
public FlutterMutatorType getType() {
return type;
}
/**
* Get the rect of the mutator if the {@link #getType()} returns FlutterMutatorType.CLIP_RECT.
*
* @return the clipping rect if the type is FlutterMutatorType.CLIP_RECT; otherwise null.
*/
public Rect getRect() {
return rect;
}
/**
* Get the path of the mutator if the {@link #getType()} returns FlutterMutatorType.CLIP_PATH.
*
* @return the clipping path if the type is FlutterMutatorType.CLIP_PATH; otherwise null.
*/
public Path getPath() {
return path;
}
/**
* Get the matrix of the mutator if the {@link #getType()} returns FlutterMutatorType.TRANSFORM.
*
* @return the matrix if the type is FlutterMutatorType.TRANSFORM; otherwise null.
*/
public Matrix getMatrix() {
return matrix;
}
}
private @NonNull List<FlutterMutator> mutators;
private List<Path> finalClippingPaths;
private Matrix finalMatrix;
/** Initialize the mutator stack. */
public FlutterMutatorsStack() {
this.mutators = new ArrayList<FlutterMutator>();
finalMatrix = new Matrix();
finalClippingPaths = new ArrayList<Path>();
}
/**
* Push a transform {@link FlutterMutatorsStack.FlutterMutator} to the stack.
*
* @param values the transform matrix to be pushed to the stack. The array matches how a {@link
* android.graphics.Matrix} is constructed.
*/
public void pushTransform(float[] values) {
Matrix matrix = new Matrix();
matrix.setValues(values);
FlutterMutator mutator = new FlutterMutator(matrix);
mutators.add(mutator);
finalMatrix.preConcat(mutator.getMatrix());
}
/** Push a clipRect {@link FlutterMutatorsStack.FlutterMutator} to the stack. */
public void pushClipRect(int left, int top, int right, int bottom) {
Rect rect = new Rect(left, top, right, bottom);
FlutterMutator mutator = new FlutterMutator(rect);
mutators.add(mutator);
Path path = new Path();
path.addRect(new RectF(rect), Path.Direction.CCW);
path.transform(finalMatrix);
finalClippingPaths.add(path);
}
/**
* Push a clipRRect {@link FlutterMutatorsStack.FlutterMutator} to the stack.
*
* @param left left offset of the rrect.
* @param top top offset of the rrect.
* @param right right position of the rrect.
* @param bottom bottom position of the rrect.
* @param radiis the radiis of the rrect. It must be size of 8, including an x and y for each
* corner.
*/
public void pushClipRRect(int left, int top, int right, int bottom, float[] radiis) {
Rect rect = new Rect(left, top, right, bottom);
FlutterMutator mutator = new FlutterMutator(rect, radiis);
mutators.add(mutator);
Path path = new Path();
path.addRoundRect(new RectF(rect), radiis, Path.Direction.CCW);
path.transform(finalMatrix);
finalClippingPaths.add(path);
}
/**
* Get a list of all the raw mutators. The 0 index of the returned list is the top of the stack.
*/
public List<FlutterMutator> getMutators() {
return mutators;
}
/**
* Get a list of all the clipping operations. All the clipping operations -- whether it is clip
* rect, clip rrect, or clip path -- are converted into Paths. The paths are also transformed with
* the matrix that up to their stack positions. For example: If the stack looks like (from top to
* bottom): TransA -> ClipA -> TransB -> ClipB, the final paths will look like
* [TransA*ClipA, TransA*TransB*ClipB].
*
* <p>Clipping this list to the parent canvas of a view results the final clipping path.
*/
public List<Path> getFinalClippingPaths() {
return finalClippingPaths;
}
/**
* Returns the final matrix. Apply this matrix to the canvas of a view results the final
* transformation of the view.
*/
public Matrix getFinalMatrix() {
return finalMatrix;
}
}
|
{% extends "base.html" %}
{% block columnwrap %}
<div id="content-main">
{% block content %}
{% endblock %}
</div>
<!-- END #content-main -->
<div id="content-related" class="sidebar">
{% block content-related %}
{% endblock %}
</div>
<!-- END #content-related -->
{% endblock %}
|
/*
* Copyright (C) 2015 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef WebRTCKeyParams_h
#define WebRTCKeyParams_h
#include "WebCommon.h"
namespace blink {
// Corresponds to rtc::KeyType in WebRTC.
enum WebRTCKeyType { WebRTCKeyTypeRSA, WebRTCKeyTypeECDSA, WebRTCKeyTypeNull };
// Corresponds to rtc::RSAParams in WebRTC.
struct WebRTCRSAParams {
unsigned modLength;
unsigned pubExp;
};
// Corresponds to rtc::ECCurve in WebRTC.
enum WebRTCECCurve { WebRTCECCurveNistP256 };
// Corresponds to rtc::KeyParams in WebRTC.
class WebRTCKeyParams {
public:
static WebRTCKeyParams createRSA(unsigned modLength, unsigned pubExp)
{
WebRTCKeyParams keyParams(WebRTCKeyTypeRSA);
keyParams.m_params.rsa.modLength = modLength;
keyParams.m_params.rsa.pubExp = pubExp;
return keyParams;
}
static WebRTCKeyParams createECDSA(WebRTCECCurve curve)
{
WebRTCKeyParams keyParams(WebRTCKeyTypeECDSA);
keyParams.m_params.ecCurve = curve;
return keyParams;
}
WebRTCKeyParams() : WebRTCKeyParams(WebRTCKeyTypeNull) {}
WebRTCKeyType keyType() const { return m_keyType; }
WebRTCRSAParams rsaParams() const
{
BLINK_ASSERT(m_keyType == WebRTCKeyTypeRSA);
return m_params.rsa;
}
WebRTCECCurve ecCurve() const
{
BLINK_ASSERT(m_keyType == WebRTCKeyTypeECDSA);
return m_params.ecCurve;
}
private:
WebRTCKeyParams(WebRTCKeyType keyType) : m_keyType(keyType) {}
WebRTCKeyType m_keyType;
union {
WebRTCRSAParams rsa;
WebRTCECCurve ecCurve;
} m_params;
};
} // namespace blink
#endif // WebRTCKeyParams_h
|
/*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#include "mitkNavigationDataPassThroughFilter.h"
mitk::NavigationDataPassThroughFilter::NavigationDataPassThroughFilter()
{
}
mitk::NavigationDataPassThroughFilter::~NavigationDataPassThroughFilter()
{
}
void mitk::NavigationDataPassThroughFilter::GenerateData()
{
// get each input and transfer the data
DataObjectPointerArray inputs = this->GetInputs(); //get all inputs
for ( unsigned int index = 0; index < inputs.size(); ++index )
{
// get the needed variables (input and output)
const mitk::NavigationData* nd = this->GetInput(index);
mitk::NavigationData* output = this->GetOutput(index);
if ( ! nd || ! output )
{
MITK_ERROR("NavigationDataToNavigationDataFilter")("NavigationDataPassThroughFilter")
<< "Input and output must not be null.";
mitkThrow() << "Input and output must not be null.";
}
output->Graft(nd); // copy all information from input to output
output->SetDataValid(nd->IsDataValid());
}
}
|
<div ng-controller="MailListCtrl">
<!-- header -->
<div class="wrapper bg-light lter b-b">
<div class="btn-group pull-right">
<button type="button" class="btn btn-sm btn-bg btn-default"><i class="fa fa-chevron-left"></i></button>
<button type="button" class="btn btn-sm btn-bg btn-default"><i class="fa fa-chevron-right"></i></button>
</div>
<div class="btn-toolbar">
<div class="btn-group dropdown">
<button class="btn btn-default btn-sm btn-bg dropdown-toggle" data-toggle="dropdown">
<span class="dropdown-label">Filter</span>
<span class="caret"></span>
</button>
<ul class="dropdown-menu text-left text-sm">
<li><a ui-sref="app.mail.list({fold:'unread'})">Unread</a></li>
<li><a ui-sref="app.mail.list({fold:'starred'})">Starred</a></li>
</ul>
</div>
<div class="btn-group">
<button class="btn btn-sm btn-bg btn-default" data-toggle="tooltip" data-placement="bottom" data-title="Refresh" data-original-title="" title=""><i class="fa fa-refresh"></i></button>
</div>
</div>
</div>
<!-- / header -->
<!-- list -->
<ul class="list-group list-group-lg no-radius m-b-none m-t-n-xxs">
<li ng-repeat="mail in mails | filter:fold" ng-class="labelClass(mail.label)" class="list-group-item clearfix b-l-3x">
<a ui-sref="app.page.profile" class="avatar thumb pull-left m-r">
<img ng-src="{{mail.avatar}}">
</a>
<div class="pull-right text-sm text-muted">
<span class="hidden-xs">{{ mail.date | fromNow }}</span>
<i class="fa fa-paperclip ng-hide m-l-sm" ng-show="{{mail.attach}}"></i>
</div>
<div class="clear">
<div><a ui-sref="app.mail.detail({mailId:mail.id})" class="text-md">{{mail.subject}}</a><span class="label bg-light m-l-sm">{{mail.label}}</span></div>
<div class="text-ellipsis m-t-xs">{{mail.content | limitTo:100}}</div>
</div>
</li>
</ul>
<!-- / list -->
</div> |
/*
* Copyright 2013 The Polymer Authors. All rights reserved.
* Use of this source code is goverened by a BSD-style
* license that can be found in the LICENSE file.
*/
suite('ChildNodeInterface', function() {
function getTree() {
var tree = {};
var div = tree.div = document.createElement('div');
div.innerHTML = 'a<b></b>c<d></d>e';
var a = tree.a = div.firstChild;
var b = tree.b = a.nextSibling;
var c = tree.c = b.nextSibling;
var d = tree.d = c.nextSibling;
var e = tree.e = d.nextSibling;
var sr = tree.sr = div.createShadowRoot();
sr.innerHTML = 'f<g></g>h<content></content>i<j></j>k';
var f = tree.f = sr.firstChild;
var g = tree.g = f.nextSibling;
var h = tree.h = g.nextSibling;
var content = tree.content = h.nextSibling;
var i = tree.i = content.nextSibling;
var j = tree.j = i.nextSibling;
var k = tree.k = j.nextSibling;
div.offsetHeight; // trigger rendering
return tree;
}
test('nextElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.nextElementSibling, tree.d);
assert.equal(tree.d.nextElementSibling, null);
assert.equal(tree.g.nextElementSibling, tree.content);
assert.equal(tree.content.nextElementSibling, tree.j);
assert.equal(tree.j.nextElementSibling, null);
});
test('previousElementSibling', function() {
var tree = getTree();
assert.equal(tree.b.previousElementSibling, null);
assert.equal(tree.d.previousElementSibling, tree.b);
assert.equal(tree.g.previousElementSibling, null);
assert.equal(tree.content.previousElementSibling, tree.g);
assert.equal(tree.j.previousElementSibling, tree.content);
});
test('remove', function() {
var div = document.createElement('div');
div.innerHTML = '<a></a>';
var a = div.firstChild;
a.remove();
assert.equal(div.firstChild, null);
assert.equal(a.parentNode, null);
// no op.
div.remove();
});
});
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package ipv4_test
func protocolNotSupported(err error) bool {
return false
}
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self,
root,
model,
api_defs,
cpp_type_generator,
cpp_namespace,
source_file_dir,
impl_dir):
self._root = root
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._source_file_dir = source_file_dir
self._impl_dir = impl_dir
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % self._source_file_dir)
ifndef_name = cpp_util.GenerateIfndefName(self._source_file_dir, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
elif platform == Platforms.LINUX:
ifdefs.append('defined(OS_LINUX)')
elif platform == Platforms.MAC:
ifdefs.append('defined(OS_MACOSX)')
elif platform == Platforms.WIN:
ifdefs.append('defined(OS_WIN)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' || '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s" % namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"%s/%s/%s_api.h" % (self._bundle._impl_dir,
namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"extensions/browser/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Append('// Determines if schema named |name| is generated.')
c.Append('static bool IsGenerated(std::string name);')
c.Append()
c.Append('// Gets the API schema named |name|.')
c.Append('static base::StringPiece Get(const std::string& name);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_schemas', c)
def _FormatNameAsConstant(name):
"""Formats a name to be a C++ constant of the form kConstantName"""
name = '%s%s' % (name[0].upper(), name[1:])
return 'k%s' % re.sub('_[a-z]',
lambda m: m.group(0)[1].upper(),
name.replace('.', '_'))
class _SchemasCCGenerator(object):
"""Generates a code.Code object for the generated schemas .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_schemas.h')))
c.Append()
c.Append('#include "base/lazy_instance.h"')
c.Append()
c.Append('namespace {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([_RemoveDescriptions(api)],
separators=(',', ':'))
# Escape all double-quotes and backslashes. For this to output a valid
# JSON C string, we need to escape \ and ". Note that some schemas are
# too large to compile on windows. Split the JSON up into several
# strings, since apparently that helps.
max_length = 8192
segments = [json_content[i:i + max_length].replace('\\', '\\\\')
.replace('"', '\\"')
for i in xrange(0, len(json_content), max_length)]
c.Append('const char %s[] = "%s";' %
(_FormatNameAsConstant(namespace.name), '" "'.join(segments)))
c.Append('}')
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Sblock('struct Static {')
c.Sblock('Static() {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
c.Append('schemas["%s"] = %s;' % (namespace.name,
_FormatNameAsConstant(namespace.name)))
c.Eblock('}')
c.Append()
c.Append('std::map<std::string, const char*> schemas;')
c.Eblock('};')
c.Append()
c.Append('base::LazyInstance<Static> g_lazy_instance;')
c.Append()
c.Append('// static')
c.Sblock('base::StringPiece GeneratedSchemas::Get('
'const std::string& name) {')
c.Append('return IsGenerated(name) ? '
'g_lazy_instance.Get().schemas[name] : "";')
c.Eblock('}')
c.Append()
c.Append('// static')
c.Sblock('bool GeneratedSchemas::IsGenerated(std::string name) {')
c.Append('return g_lazy_instance.Get().schemas.count(name) > 0;')
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
|
/*
* drivers/mtd/nand/diskonchip.c
*
* (C) 2003 Red Hat, Inc.
* (C) 2004 Dan Brown <dan_brown@ieee.org>
* (C) 2004 Kalev Lember <kalev@smartlink.ee>
*
* Author: David Woodhouse <dwmw2@infradead.org>
* Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
* Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
*
* Error correction code lifted from the old docecc code
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
* converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/rslib.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/inftl.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
#endif
static unsigned long __initdata doc_locations[] = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
#else /* CONFIG_MTD_DOCPROBE_HIGH */
0xc8000, 0xca000, 0xcc000, 0xce000,
0xd0000, 0xd2000, 0xd4000, 0xd6000,
0xd8000, 0xda000, 0xdc000, 0xde000,
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
#else
#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
static struct mtd_info *doclist = NULL;
struct doc_priv {
void __iomem *virtadr;
unsigned long physadr;
u_char ChipID;
u_char CDSNControl;
int chips_per_floor; /* The number of chips detected on each floor */
int curfloor;
int curchip;
int mh0_page;
int mh1_page;
struct mtd_info *nextdoc;
};
/* This is the syndrome computed by the HW ecc generator upon reading an empty
page, one with all 0xff for data and stored ecc code. */
static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
/* This is the ecc value computed by the HW ecc generator upon writing an empty
page, one with all 0xff for data. */
static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
#define INFTL_BBT_RESERVED_BLOCKS 4
#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int bitmask);
static void doc200x_select_chip(struct mtd_info *mtd, int chip);
static int debug = 0;
module_param(debug, int, 0);
static int try_dword = 1;
module_param(try_dword, int, 0);
static int no_ecc_failures = 0;
module_param(no_ecc_failures, int, 0);
static int no_autopart = 0;
module_param(no_autopart, int, 0);
static int show_firmware_partition = 0;
module_param(show_firmware_partition, int, 0);
#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
static int inftl_bbt_write = 1;
#else
static int inftl_bbt_write = 0;
#endif
module_param(inftl_bbt_write, int, 0);
static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
module_param(doc_config_location, ulong, 0);
MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
/* Sector size for HW ECC */
#define SECTOR_SIZE 512
/* The sector bytes are packed into NB_DATA 10 bit words */
#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
/* Number of roots */
#define NROOTS 4
/* First consective root */
#define FCR 510
/* Number of symbols */
#define NN 1023
/* the Reed Solomon control structure */
static struct rs_control *rs_decoder;
/*
* The HW decoder in the DoC ASIC's provides us a error syndrome,
* which we must convert to a standard syndrom usable by the generic
* Reed-Solomon library code.
*
* Fabrice Bellard figured this out in the old docecc code. I added
* some comments, improved a minor bit and converted it to make use
* of the generic Reed-Solomon libary. tglx
*/
static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
{
int i, j, nerr, errpos[8];
uint8_t parity;
uint16_t ds[4], s[5], tmp, errval[8], syn[4];
/* Convert the ecc bytes into words */
ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
parity = ecc[1];
/* Initialize the syndrom buffer */
for (i = 0; i < NROOTS; i++)
s[i] = ds[0];
/*
* Evaluate
* s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
* where x = alpha^(FCR + i)
*/
for (j = 1; j < NROOTS; j++) {
if (ds[j] == 0)
continue;
tmp = rs->index_of[ds[j]];
for (i = 0; i < NROOTS; i++)
s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
}
/* Calc s[i] = s[i] / alpha^(v + i) */
for (i = 0; i < NROOTS; i++) {
if (syn[i])
syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
}
/* Call the decoder library */
nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
/* Incorrectable errors ? */
if (nerr < 0)
return nerr;
/*
* Correct the errors. The bitpositions are a bit of magic,
* but they are given by the design of the de/encoder circuit
* in the DoC ASIC's.
*/
for (i = 0; i < nerr; i++) {
int index, bitpos, pos = 1015 - errpos[i];
uint8_t val;
if (pos >= NB_DATA && pos < 1019)
continue;
if (pos < NB_DATA) {
/* extract bit position (MSB first) */
pos = 10 * (NB_DATA - 1 - pos) - 6;
/* now correct the following 10 bits. At most two bytes
can be modified since pos is even */
index = (pos >> 3) ^ 1;
bitpos = pos & 7;
if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
val = (uint8_t) (errval[i] >> (2 + bitpos));
parity ^= val;
if (index < SECTOR_SIZE)
data[index] ^= val;
}
index = ((pos >> 3) + 1) ^ 1;
bitpos = (bitpos + 10) & 7;
if (bitpos == 0)
bitpos = 8;
if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
val = (uint8_t) (errval[i] << (8 - bitpos));
parity ^= val;
if (index < SECTOR_SIZE)
data[index] ^= val;
}
}
}
/* If the parity is wrong, no rescue possible */
return parity ? -EBADMSG : nerr;
}
static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
{
volatile char dummy;
int i;
for (i = 0; i < cycles; i++) {
if (DoC_is_Millennium(doc))
dummy = ReadDOC(doc->virtadr, NOP);
else if (DoC_is_MillenniumPlus(doc))
dummy = ReadDOC(doc->virtadr, Mplus_NOP);
else
dummy = ReadDOC(doc->virtadr, DOCStatus);
}
}
#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
static int _DoC_WaitReady(struct doc_priv *doc)
{
void __iomem *docptr = doc->virtadr;
unsigned long timeo = jiffies + (HZ * 10);
if (debug)
printk("_DoC_WaitReady...\n");
/* Out-of-line routine to wait for chip response */
if (DoC_is_MillenniumPlus(doc)) {
while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
if (time_after(jiffies, timeo)) {
printk("_DoC_WaitReady timed out.\n");
return -EIO;
}
udelay(1);
cond_resched();
}
} else {
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
if (time_after(jiffies, timeo)) {
printk("_DoC_WaitReady timed out.\n");
return -EIO;
}
udelay(1);
cond_resched();
}
}
return 0;
}
static inline int DoC_WaitReady(struct doc_priv *doc)
{
void __iomem *docptr = doc->virtadr;
int ret = 0;
if (DoC_is_MillenniumPlus(doc)) {
DoC_Delay(doc, 4);
if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
/* Call the out-of-line routine to wait */
ret = _DoC_WaitReady(doc);
} else {
DoC_Delay(doc, 4);
if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
/* Call the out-of-line routine to wait */
ret = _DoC_WaitReady(doc);
DoC_Delay(doc, 2);
}
if (debug)
printk("DoC_WaitReady OK\n");
return ret;
}
static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
if (debug)
printk("write_byte %02x\n", datum);
WriteDOC(datum, docptr, CDSNSlowIO);
WriteDOC(datum, docptr, 2k_CDSN_IO);
}
static u_char doc2000_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
u_char ret;
ReadDOC(docptr, CDSNSlowIO);
DoC_Delay(doc, 2);
ret = ReadDOC(docptr, 2k_CDSN_IO);
if (debug)
printk("read_byte returns %02x\n", ret);
return ret;
}
static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("writebuf of %d bytes: ", len);
for (i = 0; i < len; i++) {
WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
if (debug)
printk("\n");
}
static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("readbuf of %d bytes: ", len);
for (i = 0; i < len; i++) {
buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
}
}
static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("readbuf_dword of %d bytes: ", len);
if (unlikely((((unsigned long)buf) | len) & 3)) {
for (i = 0; i < len; i++) {
*(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
}
} else {
for (i = 0; i < len; i += 4) {
*(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
}
}
}
static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
for (i = 0; i < len; i++)
if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
return -EFAULT;
return 0;
}
static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
uint16_t ret;
doc200x_select_chip(mtd, nr);
doc200x_hwcontrol(mtd, NAND_CMD_READID,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/* We cant' use dev_ready here, but at least we wait for the
* command to complete
*/
udelay(50);
ret = this->read_byte(mtd) << 8;
ret |= this->read_byte(mtd);
if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
/* First chip probe. See if we get same results by 32-bit access */
union {
uint32_t dword;
uint8_t byte[4];
} ident;
void __iomem *docptr = doc->virtadr;
doc200x_hwcontrol(mtd, NAND_CMD_READID,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
doc200x_hwcontrol(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
udelay(50);
ident.dword = readl(docptr + DoC_2k_CDSN_IO);
if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
this->read_buf = &doc2000_readbuf_dword;
}
}
return ret;
}
static void __init doc2000_count_chips(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
uint16_t mfrid;
int i;
/* Max 4 chips per floor on DiskOnChip 2000 */
doc->chips_per_floor = 4;
/* Find out what the first chip is */
mfrid = doc200x_ident_chip(mtd, 0);
/* Find how many chips in each floor. */
for (i = 1; i < 4; i++) {
if (doc200x_ident_chip(mtd, i) != mfrid)
break;
}
doc->chips_per_floor = i;
printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
}
static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
{
struct doc_priv *doc = this->priv;
int status;
DoC_WaitReady(doc);
this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
return status;
}
static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
WriteDOC(datum, docptr, CDSNSlowIO);
WriteDOC(datum, docptr, Mil_CDSN_IO);
WriteDOC(datum, docptr, WritePipeTerm);
}
static u_char doc2001_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
//ReadDOC(docptr, CDSNSlowIO);
/* 11.4.5 -- delay twice to allow extended length cycle */
DoC_Delay(doc, 2);
ReadDOC(docptr, ReadPipeInit);
//return ReadDOC(docptr, Mil_CDSN_IO);
return ReadDOC(docptr, LastDataRead);
}
static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
for (i = 0; i < len; i++)
WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
/* Terminate write pipeline */
WriteDOC(0x00, docptr, WritePipeTerm);
}
static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
/* Start read pipeline */
ReadDOC(docptr, ReadPipeInit);
for (i = 0; i < len - 1; i++)
buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
/* Terminate read pipeline */
buf[i] = ReadDOC(docptr, LastDataRead);
}
static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
/* Start read pipeline */
ReadDOC(docptr, ReadPipeInit);
for (i = 0; i < len - 1; i++)
if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
ReadDOC(docptr, LastDataRead);
return i;
}
if (buf[i] != ReadDOC(docptr, LastDataRead))
return i;
return 0;
}
static u_char doc2001plus_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
u_char ret;
ReadDOC(docptr, Mplus_ReadPipeInit);
ReadDOC(docptr, Mplus_ReadPipeInit);
ret = ReadDOC(docptr, Mplus_LastDataRead);
if (debug)
printk("read_byte returns %02x\n", ret);
return ret;
}
static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("writebuf of %d bytes: ", len);
for (i = 0; i < len; i++) {
WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
if (debug)
printk("\n");
}
static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("readbuf of %d bytes: ", len);
/* Start read pipeline */
ReadDOC(docptr, Mplus_ReadPipeInit);
ReadDOC(docptr, Mplus_ReadPipeInit);
for (i = 0; i < len - 2; i++) {
buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
/* Terminate read pipeline */
buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
if (debug && i < 16)
printk("%02x ", buf[len - 2]);
buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
if (debug && i < 16)
printk("%02x ", buf[len - 1]);
if (debug)
printk("\n");
}
static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("verifybuf of %d bytes: ", len);
/* Start read pipeline */
ReadDOC(docptr, Mplus_ReadPipeInit);
ReadDOC(docptr, Mplus_ReadPipeInit);
for (i = 0; i < len - 2; i++)
if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
ReadDOC(docptr, Mplus_LastDataRead);
ReadDOC(docptr, Mplus_LastDataRead);
return i;
}
if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
return len - 2;
if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
return len - 1;
return 0;
}
static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int floor = 0;
if (debug)
printk("select chip (%d)\n", chip);
if (chip == -1) {
/* Disable flash internally */
WriteDOC(0, docptr, Mplus_FlashSelect);
return;
}
floor = chip / doc->chips_per_floor;
chip -= (floor * doc->chips_per_floor);
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
doc->curchip = chip;
doc->curfloor = floor;
}
static void doc200x_select_chip(struct mtd_info *mtd, int chip)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int floor = 0;
if (debug)
printk("select chip (%d)\n", chip);
if (chip == -1)
return;
floor = chip / doc->chips_per_floor;
chip -= (floor * doc->chips_per_floor);
/* 11.4.4 -- deassert CE before changing chip */
doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
WriteDOC(floor, docptr, FloorSelect);
WriteDOC(chip, docptr, CDSNDeviceSelect);
doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
doc->curchip = chip;
doc->curfloor = floor;
}
#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
if (ctrl & NAND_CTRL_CHANGE) {
doc->CDSNControl &= ~CDSN_CTRL_MSK;
doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
if (debug)
printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
WriteDOC(doc->CDSNControl, docptr, CDSNControl);
/* 11.4.3 -- 4 NOPs after CSDNControl write */
DoC_Delay(doc, 4);
}
if (cmd != NAND_CMD_NONE) {
if (DoC_is_2000(doc))
doc2000_write_byte(mtd, cmd);
else
doc2001_write_byte(mtd, cmd);
}
}
static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
/*
* Must terminate write pipeline before sending any commands
* to the device.
*/
if (command == NAND_CMD_PAGEPROG) {
WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
}
/*
* Write out the command to the device.
*/
if (command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
readcmd = NAND_CMD_READOOB;
} else if (column < 256) {
/* First 256 bytes --> READ0 */
readcmd = NAND_CMD_READ0;
} else {
column -= 256;
readcmd = NAND_CMD_READ1;
}
WriteDOC(readcmd, docptr, Mplus_FlashCmd);
}
WriteDOC(command, docptr, Mplus_FlashCmd);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
if (column != -1 || page_addr != -1) {
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (this->options & NAND_BUSWIDTH_16)
column >>= 1;
WriteDOC(column, docptr, Mplus_FlashAddress);
}
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
/* One more address cycle for higher density devices */
if (this->chipsize & 0x0c000000) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
}
WriteDOC(0, docptr, Mplus_WritePipeTerm);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
/* deassert ALE */
if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
command == NAND_CMD_READOOB || command == NAND_CMD_READID)
WriteDOC(0, docptr, Mplus_FlashControl);
}
/*
* program and erase have their own busy handlers
* status and sequential in needs no delay
*/
switch (command) {
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
return;
case NAND_CMD_RESET:
if (this->dev_ready)
break;
udelay(this->chip_delay);
WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
while (!(this->read_byte(mtd) & 0x40)) ;
return;
/* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
* command delay
*/
if (!this->dev_ready) {
udelay(this->chip_delay);
return;
}
}
/* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine. */
ndelay(100);
/* wait until command is processed */
while (!this->dev_ready(mtd)) ;
}
static int doc200x_dev_ready(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
if (DoC_is_MillenniumPlus(doc)) {
/* 11.4.2 -- must NOP four times before checking FR/B# */
DoC_Delay(doc, 4);
if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
if (debug)
printk("not ready\n");
return 0;
}
if (debug)
printk("was ready\n");
return 1;
} else {
/* 11.4.2 -- must NOP four times before checking FR/B# */
DoC_Delay(doc, 4);
if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
if (debug)
printk("not ready\n");
return 0;
}
/* 11.4.2 -- Must NOP twice if it's ready */
DoC_Delay(doc, 2);
if (debug)
printk("was ready\n");
return 1;
}
}
static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
/* This is our last resort if we couldn't find or create a BBT. Just
pretend all blocks are good. */
return 0;
}
static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
/* Prime the ECC engine */
switch (mode) {
case NAND_ECC_READ:
WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
WriteDOC(DOC_ECC_EN, docptr, ECCConf);
break;
case NAND_ECC_WRITE:
WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
break;
}
}
static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
/* Prime the ECC engine */
switch (mode) {
case NAND_ECC_READ:
WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
break;
case NAND_ECC_WRITE:
WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
break;
}
}
/* This code is only called on write */
static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
int i;
int emptymatch = 1;
/* flush the pipeline */
if (DoC_is_2000(doc)) {
WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(doc->CDSNControl, docptr, CDSNControl);
} else if (DoC_is_MillenniumPlus(doc)) {
WriteDOC(0, docptr, Mplus_NOP);
WriteDOC(0, docptr, Mplus_NOP);
WriteDOC(0, docptr, Mplus_NOP);
} else {
WriteDOC(0, docptr, NOP);
WriteDOC(0, docptr, NOP);
WriteDOC(0, docptr, NOP);
}
for (i = 0; i < 6; i++) {
if (DoC_is_MillenniumPlus(doc))
ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
else
ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
if (ecc_code[i] != empty_write_ecc[i])
emptymatch = 0;
}
if (DoC_is_MillenniumPlus(doc))
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
#if 0
/* If emptymatch=1, we might have an all-0xff data buffer. Check. */
if (emptymatch) {
/* Note: this somewhat expensive test should not be triggered
often. It could be optimized away by examining the data in
the writebuf routine, and remembering the result. */
for (i = 0; i < 512; i++) {
if (dat[i] == 0xff)
continue;
emptymatch = 0;
break;
}
}
/* If emptymatch still =1, we do have an all-0xff data buffer.
Return all-0xff ecc value instead of the computed one, so
it'll look just like a freshly-erased page. */
if (emptymatch)
memset(ecc_code, 0xff, 6);
#endif
return 0;
}
static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *isnull)
{
int i, ret = 0;
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
void __iomem *docptr = doc->virtadr;
uint8_t calc_ecc[6];
volatile u_char dummy;
int emptymatch = 1;
/* flush the pipeline */
if (DoC_is_2000(doc)) {
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
} else if (DoC_is_MillenniumPlus(doc)) {
dummy = ReadDOC(docptr, Mplus_ECCConf);
dummy = ReadDOC(docptr, Mplus_ECCConf);
dummy = ReadDOC(docptr, Mplus_ECCConf);
} else {
dummy = ReadDOC(docptr, ECCConf);
dummy = ReadDOC(docptr, ECCConf);
dummy = ReadDOC(docptr, ECCConf);
}
/* Error occured ? */
if (dummy & 0x80) {
for (i = 0; i < 6; i++) {
if (DoC_is_MillenniumPlus(doc))
calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
else
calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
if (calc_ecc[i] != empty_read_syndrome[i])
emptymatch = 0;
}
/* If emptymatch=1, the read syndrome is consistent with an
all-0xff data and stored ecc block. Check the stored ecc. */
if (emptymatch) {
for (i = 0; i < 6; i++) {
if (read_ecc[i] == 0xff)
continue;
emptymatch = 0;
break;
}
}
/* If emptymatch still =1, check the data block. */
if (emptymatch) {
/* Note: this somewhat expensive test should not be triggered
often. It could be optimized away by examining the data in
the readbuf routine, and remembering the result. */
for (i = 0; i < 512; i++) {
if (dat[i] == 0xff)
continue;
emptymatch = 0;
break;
}
}
/* If emptymatch still =1, this is almost certainly a freshly-
erased block, in which case the ECC will not come out right.
We'll suppress the error and tell the caller everything's
OK. Because it is. */
if (!emptymatch)
ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
if (ret > 0)
printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
}
if (DoC_is_MillenniumPlus(doc))
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
if (no_ecc_failures && (ret == -EBADMSG)) {
printk(KERN_ERR "suppressing ECC failure\n");
ret = 0;
}
return ret;
}
//u_char mydatabuf[528];
/* The strange out-of-order .oobfree list below is a (possibly unneeded)
* attempt to retain compatibility. It used to read:
* .oobfree = { {8, 8} }
* Since that leaves two bytes unusable, it was changed. But the following
* scheme might affect existing jffs2 installs by moving the cleanmarker:
* .oobfree = { {6, 10} }
* jffs2 seems to handle the above gracefully, but the current scheme seems
* safer. The only problem with it is that any code that parses oobfree must
* be able to handle out-of-order segments.
*/
static struct nand_ecclayout doc200x_oobinfo = {
.eccbytes = 6,
.eccpos = {0, 1, 2, 3, 4, 5},
.oobfree = {{8, 8}, {6, 2}}
};
/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
On successful return, buf will contain a copy of the media header for
further processing. id is the string to scan for, and will presumably be
either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
header. The page #s of the found media headers are placed in mh0_page and
mh1_page in the DOC private structure. */
static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
unsigned offs;
int ret;
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
}
if (memcmp(buf, id, 6))
continue;
printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
if (doc->mh0_page == -1) {
doc->mh0_page = offs >> this->page_shift;
if (!findmirror)
return 1;
continue;
}
doc->mh1_page = offs >> this->page_shift;
return 2;
}
if (doc->mh0_page == -1) {
printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
return 0;
}
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
return 0;
}
return 1;
}
static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
int ret = 0;
u_char *buf;
struct NFTLMediaHeader *mh;
const unsigned psize = 1 << this->page_shift;
int numparts = 0;
unsigned blocks, maxblocks;
int offs, numheaders;
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
goto out;
mh = (struct NFTLMediaHeader *)buf;
le16_to_cpus(&mh->NumEraseUnits);
le16_to_cpus(&mh->FirstPhysicalEUN);
le32_to_cpus(&mh->FormattedSize);
printk(KERN_INFO " DataOrgID = %s\n"
" NumEraseUnits = %d\n"
" FirstPhysicalEUN = %d\n"
" FormattedSize = %d\n"
" UnitSizeFactor = %d\n",
mh->DataOrgID, mh->NumEraseUnits,
mh->FirstPhysicalEUN, mh->FormattedSize,
mh->UnitSizeFactor);
blocks = mtd->size >> this->phys_erase_shift;
maxblocks = min(32768U, mtd->erasesize - psize);
if (mh->UnitSizeFactor == 0x00) {
/* Auto-determine UnitSizeFactor. The constraints are:
- There can be at most 32768 virtual blocks.
- There can be at most (virtual block size - page size)
virtual blocks (because MediaHeader+BBT must fit in 1).
*/
mh->UnitSizeFactor = 0xff;
while (blocks > maxblocks) {
blocks >>= 1;
maxblocks = min(32768U, (maxblocks << 1) + psize);
mh->UnitSizeFactor--;
}
printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
}
/* NOTE: The lines below modify internal variables of the NAND and MTD
layers; variables with have already been configured by nand_scan.
Unfortunately, we didn't know before this point what these values
should be. Thus, this code is somewhat dependant on the exact
implementation of the NAND layer. */
if (mh->UnitSizeFactor != 0xff) {
this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
maxblocks = min(32768U, mtd->erasesize - psize);
}
if (blocks > maxblocks) {
printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
goto out;
}
/* Skip past the media headers. */
offs = max(doc->mh0_page, doc->mh1_page);
offs <<= this->page_shift;
offs += mtd->erasesize;
if (show_firmware_partition == 1) {
parts[0].name = " DiskOnChip Firmware / Media Header partition";
parts[0].offset = 0;
parts[0].size = offs;
numparts = 1;
}
parts[numparts].name = " DiskOnChip BDTL partition";
parts[numparts].offset = offs;
parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
offs += parts[numparts].size;
numparts++;
if (offs < mtd->size) {
parts[numparts].name = " DiskOnChip Remainder partition";
parts[numparts].offset = offs;
parts[numparts].size = mtd->size - offs;
numparts++;
}
ret = numparts;
out:
kfree(buf);
return ret;
}
/* This is a stripped-down copy of the code in inftlmount.c */
static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
int ret = 0;
u_char *buf;
struct INFTLMediaHeader *mh;
struct INFTLPartition *ip;
int numparts = 0;
int blocks;
int vshift, lastvunit = 0;
int i;
int end = mtd->size;
if (inftl_bbt_write)
end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
return 0;
}
if (!find_media_headers(mtd, buf, "BNAND", 0))
goto out;
doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
mh = (struct INFTLMediaHeader *)buf;
le32_to_cpus(&mh->NoOfBootImageBlocks);
le32_to_cpus(&mh->NoOfBinaryPartitions);
le32_to_cpus(&mh->NoOfBDTLPartitions);
le32_to_cpus(&mh->BlockMultiplierBits);
le32_to_cpus(&mh->FormatFlags);
le32_to_cpus(&mh->PercentUsed);
printk(KERN_INFO " bootRecordID = %s\n"
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
" BlockMultiplerBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = %d.%d.%d.%d\n"
" PercentUsed = %d\n",
mh->bootRecordID, mh->NoOfBootImageBlocks,
mh->NoOfBinaryPartitions,
mh->NoOfBDTLPartitions,
mh->BlockMultiplierBits, mh->FormatFlags,
((unsigned char *) &mh->OsakVersion)[0] & 0xf,
((unsigned char *) &mh->OsakVersion)[1] & 0xf,
((unsigned char *) &mh->OsakVersion)[2] & 0xf,
((unsigned char *) &mh->OsakVersion)[3] & 0xf,
mh->PercentUsed);
vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
blocks = mtd->size >> vshift;
if (blocks > 32768) {
printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
goto out;
}
blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
if (inftl_bbt_write && (blocks > mtd->erasesize)) {
printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
goto out;
}
/* Scan the partitions */
for (i = 0; (i < 4); i++) {
ip = &(mh->Partitions[i]);
le32_to_cpus(&ip->virtualUnits);
le32_to_cpus(&ip->firstUnit);
le32_to_cpus(&ip->lastUnit);
le32_to_cpus(&ip->flags);
le32_to_cpus(&ip->spareUnits);
le32_to_cpus(&ip->Reserved0);
printk(KERN_INFO " PARTITION[%d] ->\n"
" virtualUnits = %d\n"
" firstUnit = %d\n"
" lastUnit = %d\n"
" flags = 0x%x\n"
" spareUnits = %d\n",
i, ip->virtualUnits, ip->firstUnit,
ip->lastUnit, ip->flags,
ip->spareUnits);
if ((show_firmware_partition == 1) &&
(i == 0) && (ip->firstUnit > 0)) {
parts[0].name = " DiskOnChip IPL / Media Header partition";
parts[0].offset = 0;
parts[0].size = mtd->erasesize * ip->firstUnit;
numparts = 1;
}
if (ip->flags & INFTL_BINARY)
parts[numparts].name = " DiskOnChip BDK partition";
else
parts[numparts].name = " DiskOnChip BDTL partition";
parts[numparts].offset = ip->firstUnit << vshift;
parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
numparts++;
if (ip->lastUnit > lastvunit)
lastvunit = ip->lastUnit;
if (ip->flags & INFTL_LAST)
break;
}
lastvunit++;
if ((lastvunit << vshift) < end) {
parts[numparts].name = " DiskOnChip Remainder partition";
parts[numparts].offset = lastvunit << vshift;
parts[numparts].size = end - parts[numparts].offset;
numparts++;
}
ret = numparts;
out:
kfree(buf);
return ret;
}
static int __init nftl_scan_bbt(struct mtd_info *mtd)
{
int ret, numparts;
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
struct mtd_partition parts[2];
memset((char *)parts, 0, sizeof(parts));
/* On NFTL, we have to find the media headers before we can read the
BBTs, since they're stored in the media header eraseblocks. */
numparts = nftl_partscan(mtd, parts);
if (!numparts)
return -EIO;
this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
NAND_BBT_VERSION;
this->bbt_td->veroffs = 7;
this->bbt_td->pages[0] = doc->mh0_page + 1;
if (doc->mh1_page != -1) {
this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
NAND_BBT_VERSION;
this->bbt_md->veroffs = 7;
this->bbt_md->pages[0] = doc->mh1_page + 1;
} else {
this->bbt_md = NULL;
}
/* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
add_mtd_device(mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (!no_autopart)
add_mtd_partitions(mtd, parts, numparts);
#endif
return 0;
}
static int __init inftl_scan_bbt(struct mtd_info *mtd)
{
int ret, numparts;
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
struct mtd_partition parts[5];
if (this->numchips > doc->chips_per_floor) {
printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
return -EIO;
}
if (DoC_is_MillenniumPlus(doc)) {
this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
if (inftl_bbt_write)
this->bbt_td->options |= NAND_BBT_WRITE;
this->bbt_td->pages[0] = 2;
this->bbt_md = NULL;
} else {
this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
if (inftl_bbt_write)
this->bbt_td->options |= NAND_BBT_WRITE;
this->bbt_td->offs = 8;
this->bbt_td->len = 8;
this->bbt_td->veroffs = 7;
this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
this->bbt_td->reserved_block_code = 0x01;
this->bbt_td->pattern = "MSYS_BBT";
this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
if (inftl_bbt_write)
this->bbt_md->options |= NAND_BBT_WRITE;
this->bbt_md->offs = 8;
this->bbt_md->len = 8;
this->bbt_md->veroffs = 7;
this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
this->bbt_md->reserved_block_code = 0x01;
this->bbt_md->pattern = "TBB_SYSM";
}
/* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
At least as nand_bbt.c is currently written. */
if ((ret = nand_scan_bbt(mtd, NULL)))
return ret;
memset((char *)parts, 0, sizeof(parts));
numparts = inftl_partscan(mtd, parts);
/* At least for now, require the INFTL Media Header. We could probably
do without it for non-INFTL use, since all it gives us is
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
add_mtd_device(mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (!no_autopart)
add_mtd_partitions(mtd, parts, numparts);
#endif
return 0;
}
static inline int __init doc2000_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
this->read_byte = doc2000_read_byte;
this->write_buf = doc2000_writebuf;
this->read_buf = doc2000_readbuf;
this->verify_buf = doc2000_verifybuf;
this->scan_bbt = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (NFTL Model)";
return (4 * doc->chips_per_floor);
}
static inline int __init doc2001_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
this->read_byte = doc2001_read_byte;
this->write_buf = doc2001_writebuf;
this->read_buf = doc2001_readbuf;
this->verify_buf = doc2001_verifybuf;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
/* It's not a Millennium; it's one of the newer
DiskOnChip 2000 units with a similar ASIC.
Treat it like a Millennium, except that it
can have multiple chips. */
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (INFTL Model)";
this->scan_bbt = inftl_scan_bbt;
return (4 * doc->chips_per_floor);
} else {
/* Bog-standard Millennium */
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium";
this->scan_bbt = nftl_scan_bbt;
return 1;
}
}
static inline int __init doc2001plus_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = this->priv;
this->read_byte = doc2001plus_read_byte;
this->write_buf = doc2001plus_writebuf;
this->read_buf = doc2001plus_readbuf;
this->verify_buf = doc2001plus_verifybuf;
this->scan_bbt = inftl_scan_bbt;
this->cmd_ctrl = NULL;
this->select_chip = doc2001plus_select_chip;
this->cmdfunc = doc2001plus_command;
this->ecc.hwctl = doc2001plus_enable_hwecc;
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium Plus";
return 1;
}
static int __init doc_probe(unsigned long physadr)
{
unsigned char ChipID;
struct mtd_info *mtd;
struct nand_chip *nand;
struct doc_priv *doc;
void __iomem *virtadr;
unsigned char save_control;
unsigned char tmp, tmpb, tmpc;
int reg, len, numchips;
int ret = 0;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
return -EIO;
}
/* It's not possible to cleanly detect the DiskOnChip - the
* bootup procedure will put the device into reset mode, and
* it's not possible to talk to it without actually writing
* to the DOCControl register. So we store the current contents
* of the DOCControl register's location, in case we later decide
* that it's not a DiskOnChip, and want to put it back how we
* found it.
*/
save_control = ReadDOC(virtadr, DOCControl);
/* Reset the DiskOnChip ASIC */
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
/* Enable the DiskOnChip ASIC */
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
ChipID = ReadDOC(virtadr, ChipID);
switch (ChipID) {
case DOC_ChipID_Doc2k:
reg = DoC_2k_ECCStatus;
break;
case DOC_ChipID_DocMil:
reg = DoC_ECCConf;
break;
case DOC_ChipID_DocMilPlus16:
case DOC_ChipID_DocMilPlus32:
case 0:
/* Possible Millennium Plus, need to do more checks */
/* Possibly release from power down mode */
for (tmp = 0; (tmp < 4); tmp++)
ReadDOC(virtadr, Mplus_Power);
/* Reset the Millennium Plus ASIC */
tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
mdelay(1);
/* Enable the Millennium Plus ASIC */
tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
mdelay(1);
ChipID = ReadDOC(virtadr, ChipID);
switch (ChipID) {
case DOC_ChipID_DocMilPlus16:
reg = DoC_Mplus_Toggle;
break;
case DOC_ChipID_DocMilPlus32:
printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
default:
ret = -ENODEV;
goto notfound;
}
break;
default:
ret = -ENODEV;
goto notfound;
}
/* Check the TOGGLE bit in the ECC register */
tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
if ((tmp == tmpb) || (tmp != tmpc)) {
printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
ret = -ENODEV;
goto notfound;
}
for (mtd = doclist; mtd; mtd = doc->nextdoc) {
unsigned char oldval;
unsigned char newval;
nand = mtd->priv;
doc = nand->priv;
/* Use the alias resolution register to determine if this is
in fact the same DOC aliased to a new address. If writes
to one chip's alias resolution register change the value on
the other chip, they're the same chip. */
if (ChipID == DOC_ChipID_DocMilPlus16) {
oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
newval = ReadDOC(virtadr, Mplus_AliasResolution);
} else {
oldval = ReadDOC(doc->virtadr, AliasResolution);
newval = ReadDOC(virtadr, AliasResolution);
}
if (oldval != newval)
continue;
if (ChipID == DOC_ChipID_DocMilPlus16) {
WriteDOC(~newval, virtadr, Mplus_AliasResolution);
oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
} else {
WriteDOC(~newval, virtadr, AliasResolution);
oldval = ReadDOC(doc->virtadr, AliasResolution);
WriteDOC(newval, virtadr, AliasResolution); // restore it
}
newval = ~newval;
if (oldval == newval) {
printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
goto notfound;
}
}
printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
len = sizeof(struct mtd_info) +
sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
mtd = kzalloc(len, GFP_KERNEL);
if (!mtd) {
printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
ret = -ENOMEM;
goto fail;
}
nand = (struct nand_chip *) (mtd + 1);
doc = (struct doc_priv *) (nand + 1);
nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
nand->bbt_md = nand->bbt_td + 1;
mtd->priv = nand;
mtd->owner = THIS_MODULE;
nand->priv = doc;
nand->select_chip = doc200x_select_chip;
nand->cmd_ctrl = doc200x_hwcontrol;
nand->dev_ready = doc200x_dev_ready;
nand->waitfunc = doc200x_wait;
nand->block_bad = doc200x_block_bad;
nand->ecc.hwctl = doc200x_enable_hwecc;
nand->ecc.calculate = doc200x_calculate_ecc;
nand->ecc.correct = doc200x_correct_data;
nand->ecc.layout = &doc200x_oobinfo;
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
nand->options = NAND_USE_FLASH_BBT;
doc->physadr = physadr;
doc->virtadr = virtadr;
doc->ChipID = ChipID;
doc->curfloor = -1;
doc->curchip = -1;
doc->mh0_page = -1;
doc->mh1_page = -1;
doc->nextdoc = doclist;
if (ChipID == DOC_ChipID_Doc2k)
numchips = doc2000_init(mtd);
else if (ChipID == DOC_ChipID_DocMilPlus16)
numchips = doc2001plus_init(mtd);
else
numchips = doc2001_init(mtd);
if ((ret = nand_scan(mtd, numchips))) {
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
/* nand_release will call del_mtd_device, but we haven't yet
added it. This is handled without incident by
del_mtd_device, as far as I can tell. */
nand_release(mtd);
kfree(mtd);
goto fail;
}
/* Success! */
doclist = mtd;
return 0;
notfound:
/* Put back the contents of the DOCControl register, in case it's not
actually a DiskOnChip. */
WriteDOC(save_control, virtadr, DOCControl);
fail:
iounmap(virtadr);
return ret;
}
static void release_nanddoc(void)
{
struct mtd_info *mtd, *nextmtd;
struct nand_chip *nand;
struct doc_priv *doc;
for (mtd = doclist; mtd; mtd = nextmtd) {
nand = mtd->priv;
doc = nand->priv;
nextmtd = doc->nextdoc;
nand_release(mtd);
iounmap(doc->virtadr);
kfree(mtd);
}
}
static int __init init_nanddoc(void)
{
int i, ret = 0;
/* We could create the decoder on demand, if memory is a concern.
* This way we have it handy, if an error happens
*
* Symbolsize is 10 (bits)
* Primitve polynomial is x^10+x^3+1
* first consecutive root is 510
* primitve element to generate roots = 1
* generator polinomial degree = 4
*/
rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
if (!rs_decoder) {
printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
return -ENOMEM;
}
if (doc_config_location) {
printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
ret = doc_probe(doc_config_location);
if (ret < 0)
goto outerr;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
doc_probe(doc_locations[i]);
}
}
/* No banner message any more. Print a message if no DiskOnChip
found, so the user knows we at least tried. */
if (!doclist) {
printk(KERN_INFO "No valid DiskOnChip devices found\n");
ret = -ENODEV;
goto outerr;
}
return 0;
outerr:
free_rs(rs_decoder);
return ret;
}
static void __exit cleanup_nanddoc(void)
{
/* Cleanup the nand/DoC resources */
release_nanddoc();
/* Free the reed solomon resources */
if (rs_decoder) {
free_rs(rs_decoder);
}
}
module_init(init_nanddoc);
module_exit(cleanup_nanddoc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
|
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\HttpFoundation;
/**
* HTTP header utility functions.
*
* @author Christian Schmidt <github@chsc.dk>
*/
class HeaderUtils
{
public const DISPOSITION_ATTACHMENT = 'attachment';
public const DISPOSITION_INLINE = 'inline';
/**
* This class should not be instantiated.
*/
private function __construct()
{
}
/**
* Splits an HTTP header by one or more separators.
*
* Example:
*
* HeaderUtils::split("da, en-gb;q=0.8", ",;")
* // => ['da'], ['en-gb', 'q=0.8']]
*
* @param string $header HTTP header value
* @param string $separators List of characters to split on, ordered by
* precedence, e.g. ",", ";=", or ",;="
*
* @return array Nested array with as many levels as there are characters in
* $separators
*/
public static function split(string $header, string $separators): array
{
$quotedSeparators = preg_quote($separators, '/');
preg_match_all('
/
(?!\s)
(?:
# quoted-string
"(?:[^"\\\\]|\\\\.)*(?:"|\\\\|$)
|
# token
[^"'.$quotedSeparators.']+
)+
(?<!\s)
|
# separator
\s*
(?<separator>['.$quotedSeparators.'])
\s*
/x', trim($header), $matches, PREG_SET_ORDER);
return self::groupParts($matches, $separators);
}
/**
* Combines an array of arrays into one associative array.
*
* Each of the nested arrays should have one or two elements. The first
* value will be used as the keys in the associative array, and the second
* will be used as the values, or true if the nested array only contains one
* element. Array keys are lowercased.
*
* Example:
*
* HeaderUtils::combine([["foo", "abc"], ["bar"]])
* // => ["foo" => "abc", "bar" => true]
*/
public static function combine(array $parts): array
{
$assoc = [];
foreach ($parts as $part) {
$name = strtolower($part[0]);
$value = $part[1] ?? true;
$assoc[$name] = $value;
}
return $assoc;
}
/**
* Joins an associative array into a string for use in an HTTP header.
*
* The key and value of each entry are joined with "=", and all entries
* are joined with the specified separator and an additional space (for
* readability). Values are quoted if necessary.
*
* Example:
*
* HeaderUtils::toString(["foo" => "abc", "bar" => true, "baz" => "a b c"], ",")
* // => 'foo=abc, bar, baz="a b c"'
*/
public static function toString(array $assoc, string $separator): string
{
$parts = [];
foreach ($assoc as $name => $value) {
if (true === $value) {
$parts[] = $name;
} else {
$parts[] = $name.'='.self::quote($value);
}
}
return implode($separator.' ', $parts);
}
/**
* Encodes a string as a quoted string, if necessary.
*
* If a string contains characters not allowed by the "token" construct in
* the HTTP specification, it is backslash-escaped and enclosed in quotes
* to match the "quoted-string" construct.
*/
public static function quote(string $s): string
{
if (preg_match('/^[a-z0-9!#$%&\'*.^_`|~-]+$/i', $s)) {
return $s;
}
return '"'.addcslashes($s, '"\\"').'"';
}
/**
* Decodes a quoted string.
*
* If passed an unquoted string that matches the "token" construct (as
* defined in the HTTP specification), it is passed through verbatimly.
*/
public static function unquote(string $s): string
{
return preg_replace('/\\\\(.)|"/', '$1', $s);
}
/**
* Generates a HTTP Content-Disposition field-value.
*
* @param string $disposition One of "inline" or "attachment"
* @param string $filename A unicode string
* @param string $filenameFallback A string containing only ASCII characters that
* is semantically equivalent to $filename. If the filename is already ASCII,
* it can be omitted, or just copied from $filename
*
* @return string A string suitable for use as a Content-Disposition field-value
*
* @throws \InvalidArgumentException
*
* @see RFC 6266
*/
public static function makeDisposition(string $disposition, string $filename, string $filenameFallback = ''): string
{
if (!\in_array($disposition, [self::DISPOSITION_ATTACHMENT, self::DISPOSITION_INLINE])) {
throw new \InvalidArgumentException(sprintf('The disposition must be either "%s" or "%s".', self::DISPOSITION_ATTACHMENT, self::DISPOSITION_INLINE));
}
if ('' === $filenameFallback) {
$filenameFallback = $filename;
}
// filenameFallback is not ASCII.
if (!preg_match('/^[\x20-\x7e]*$/', $filenameFallback)) {
throw new \InvalidArgumentException('The filename fallback must only contain ASCII characters.');
}
// percent characters aren't safe in fallback.
if (false !== strpos($filenameFallback, '%')) {
throw new \InvalidArgumentException('The filename fallback cannot contain the "%" character.');
}
// path separators aren't allowed in either.
if (false !== strpos($filename, '/') || false !== strpos($filename, '\\') || false !== strpos($filenameFallback, '/') || false !== strpos($filenameFallback, '\\')) {
throw new \InvalidArgumentException('The filename and the fallback cannot contain the "/" and "\\" characters.');
}
$params = ['filename' => $filenameFallback];
if ($filename !== $filenameFallback) {
$params['filename*'] = "utf-8''".rawurlencode($filename);
}
return $disposition.'; '.self::toString($params, ';');
}
private static function groupParts(array $matches, string $separators): array
{
$separator = $separators[0];
$partSeparators = substr($separators, 1);
$i = 0;
$partMatches = [];
foreach ($matches as $match) {
if (isset($match['separator']) && $match['separator'] === $separator) {
++$i;
} else {
$partMatches[$i][] = $match;
}
}
$parts = [];
if ($partSeparators) {
foreach ($partMatches as $matches) {
$parts[] = self::groupParts($matches, $partSeparators);
}
} else {
foreach ($partMatches as $matches) {
$parts[] = self::unquote($matches[0][0]);
}
}
return $parts;
}
}
|
<div class="share sticky-top sticky-top-offset">
<p>
Share
</p>
<ul>
<li class="ml-1 mr-1">
<a target="_blank" href="https://twitter.com/intent/tweet?text={{ page.title }}&url={{ page.url | absolute_url }}" onclick="window.open(this.href, 'twitter-share', 'width=550,height=235');return false;">
<i class="fab fa-twitter"></i>
</a>
</li>
<li class="ml-1 mr-1">
<a target="_blank" href="https://facebook.com/sharer.php?u={{ page.url | absolute_url }}" onclick="window.open(this.href, 'facebook-share', 'width=550,height=435');return false;">
<i class="fab fa-facebook-f"></i>
</a>
</li>
<li class="ml-1 mr-1">
<a target="_blank" href="https://www.linkedin.com/shareArticle?mini=true&url={{ page.url | absolute_url }}" onclick="window.open(this.href, 'width=550,height=435');return false;">
<i class="fab fa-linkedin-in"></i>
</a>
</li>
</ul>
{% if page.comments != false %}
<div class="sep">
</div>
<ul>
<li>
<a class="small smoothscroll" href="#disqus_thread"></a>
</li>
</ul>
{% endif %}
</div>
|
// CodeContracts
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// File System.Windows.Media.Effects.BevelBitmapEffect.cs
// Automatically generated contract file.
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Diagnostics.Contracts;
using System;
// Disable the "this variable is not used" warning as every field would imply it.
#pragma warning disable 0414
// Disable the "this variable is never assigned to".
#pragma warning disable 0067
// Disable the "this event is never assigned to".
#pragma warning disable 0649
// Disable the "this variable is never used".
#pragma warning disable 0169
// Disable the "new keyword not required" warning.
#pragma warning disable 0109
// Disable the "extern without DllImport" warning.
#pragma warning disable 0626
// Disable the "could hide other member" warning, can happen on certain properties.
#pragma warning disable 0108
namespace System.Windows.Media.Effects
{
sealed public partial class BevelBitmapEffect : BitmapEffect
{
#region Methods and constructors
public BevelBitmapEffect()
{
}
public System.Windows.Media.Effects.BevelBitmapEffect Clone()
{
return default(System.Windows.Media.Effects.BevelBitmapEffect);
}
public System.Windows.Media.Effects.BevelBitmapEffect CloneCurrentValue()
{
return default(System.Windows.Media.Effects.BevelBitmapEffect);
}
protected override System.Windows.Freezable CreateInstanceCore()
{
return default(System.Windows.Freezable);
}
protected override System.Runtime.InteropServices.SafeHandle CreateUnmanagedEffect()
{
return default(System.Runtime.InteropServices.SafeHandle);
}
protected override void UpdateUnmanagedPropertyState(System.Runtime.InteropServices.SafeHandle unmanagedEffect)
{
}
#endregion
#region Properties and indexers
public double BevelWidth
{
get
{
return default(double);
}
set
{
}
}
public EdgeProfile EdgeProfile
{
get
{
return default(EdgeProfile);
}
set
{
}
}
public double LightAngle
{
get
{
return default(double);
}
set
{
}
}
public double Relief
{
get
{
return default(double);
}
set
{
}
}
public double Smoothness
{
get
{
return default(double);
}
set
{
}
}
#endregion
#region Fields
public readonly static System.Windows.DependencyProperty BevelWidthProperty;
public readonly static System.Windows.DependencyProperty EdgeProfileProperty;
public readonly static System.Windows.DependencyProperty LightAngleProperty;
public readonly static System.Windows.DependencyProperty ReliefProperty;
public readonly static System.Windows.DependencyProperty SmoothnessProperty;
#endregion
}
}
|
#ifndef WIN32IOP_H
#define WIN32IOP_H
#ifndef START_EXTERN_C
#ifdef __cplusplus
# define START_EXTERN_C extern "C" {
# define END_EXTERN_C }
# define EXTERN_C extern "C"
#else
# define START_EXTERN_C
# define END_EXTERN_C
# define EXTERN_C
#endif
#endif
#if defined(_MSC_VER) || defined(__MINGW32__)
# include <sys/utime.h>
#else
# include <utime.h>
#endif
/*
* defines for flock emulation
*/
#define LOCK_SH 1
#define LOCK_EX 2
#define LOCK_NB 4
#define LOCK_UN 8
/*
* Make this as close to original stdio as possible.
*/
/*
* function prototypes for our own win32io layer
*/
START_EXTERN_C
DllExport int * win32_errno(void);
DllExport char *** win32_environ(void);
DllExport FILE* win32_stdin(void);
DllExport FILE* win32_stdout(void);
DllExport FILE* win32_stderr(void);
DllExport int win32_ferror(FILE *fp);
DllExport int win32_feof(FILE *fp);
DllExport char* win32_strerror(int e);
DllExport int win32_fprintf(FILE *pf, const char *format, ...);
DllExport int win32_printf(const char *format, ...);
DllExport int win32_vfprintf(FILE *pf, const char *format, va_list arg);
DllExport int win32_vprintf(const char *format, va_list arg);
DllExport size_t win32_fread(void *buf, size_t size, size_t count, FILE *pf);
DllExport size_t win32_fwrite(const void *buf, size_t size, size_t count, FILE *pf);
DllExport FILE* win32_fopen(const char *path, const char *mode);
DllExport FILE* win32_fdopen(int fh, const char *mode);
DllExport FILE* win32_freopen(const char *path, const char *mode, FILE *pf);
DllExport int win32_fclose(FILE *pf);
DllExport int win32_fputs(const char *s,FILE *pf);
DllExport int win32_fputc(int c,FILE *pf);
DllExport int win32_ungetc(int c,FILE *pf);
DllExport int win32_getc(FILE *pf);
DllExport int win32_fileno(FILE *pf);
DllExport void win32_clearerr(FILE *pf);
DllExport int win32_fflush(FILE *pf);
DllExport Off_t win32_ftell(FILE *pf);
DllExport int win32_fseek(FILE *pf,Off_t offset,int origin);
DllExport int win32_fgetpos(FILE *pf,fpos_t *p);
DllExport int win32_fsetpos(FILE *pf,const fpos_t *p);
DllExport void win32_rewind(FILE *pf);
DllExport int win32_tmpfd(void);
DllExport FILE* win32_tmpfile(void);
DllExport void win32_abort(void);
DllExport int win32_fstat(int fd,Stat_t *sbufptr);
DllExport int win32_stat(const char *name,Stat_t *sbufptr);
DllExport int win32_pipe( int *phandles, unsigned int psize, int textmode );
DllExport PerlIO* win32_popen( const char *command, const char *mode );
DllExport PerlIO* win32_popenlist(const char *mode, IV narg, SV **args);
DllExport int win32_pclose( PerlIO *pf);
DllExport int win32_rename( const char *oname, const char *newname);
DllExport int win32_setmode( int fd, int mode);
DllExport int win32_chsize(int fd, Off_t size);
DllExport Off_t win32_lseek( int fd, Off_t offset, int origin);
DllExport Off_t win32_tell( int fd);
DllExport int win32_dup( int fd);
DllExport int win32_dup2(int h1, int h2);
DllExport int win32_open(const char *path, int oflag,...);
DllExport int win32_close(int fd);
DllExport int win32_eof(int fd);
DllExport int win32_isatty(int fd);
DllExport int win32_read(int fd, void *buf, unsigned int cnt);
DllExport int win32_write(int fd, const void *buf, unsigned int cnt);
DllExport int win32_spawnvp(int mode, const char *cmdname,
const char *const *argv);
DllExport int win32_mkdir(const char *dir, int mode);
DllExport int win32_rmdir(const char *dir);
DllExport int win32_chdir(const char *dir);
DllExport int win32_flock(int fd, int oper);
DllExport int win32_execv(const char *cmdname, const char *const *argv);
DllExport int win32_execvp(const char *cmdname, const char *const *argv);
DllExport void win32_perror(const char *str);
DllExport void win32_setbuf(FILE *pf, char *buf);
DllExport int win32_setvbuf(FILE *pf, char *buf, int type, size_t size);
DllExport int win32_flushall(void);
DllExport int win32_fcloseall(void);
DllExport char* win32_fgets(char *s, int n, FILE *pf);
DllExport char* win32_gets(char *s);
DllExport int win32_fgetc(FILE *pf);
DllExport int win32_putc(int c, FILE *pf);
DllExport int win32_puts(const char *s);
DllExport int win32_getchar(void);
DllExport int win32_putchar(int c);
DllExport void* win32_malloc(size_t size);
DllExport void* win32_calloc(size_t numitems, size_t size);
DllExport void* win32_realloc(void *block, size_t size);
DllExport void win32_free(void *block);
DllExport int win32_open_osfhandle(intptr_t handle, int flags);
DllExport intptr_t win32_get_osfhandle(int fd);
DllExport FILE* win32_fdupopen(FILE *pf);
DllExport DIR* win32_opendir(const char *filename);
DllExport struct direct* win32_readdir(DIR *dirp);
DllExport long win32_telldir(DIR *dirp);
DllExport void win32_seekdir(DIR *dirp, long loc);
DllExport void win32_rewinddir(DIR *dirp);
DllExport int win32_closedir(DIR *dirp);
DllExport DIR* win32_dirp_dup(DIR *const dirp, CLONE_PARAMS *const param);
DllExport char* win32_getenv(const char *name);
DllExport int win32_putenv(const char *name);
DllExport unsigned win32_sleep(unsigned int);
DllExport int win32_times(struct tms *timebuf);
DllExport unsigned win32_alarm(unsigned int sec);
DllExport int win32_stat(const char *path, Stat_t *buf);
DllExport char* win32_longpath(char *path);
DllExport char* win32_ansipath(const WCHAR *path);
DllExport int win32_ioctl(int i, unsigned int u, char *data);
DllExport int win32_link(const char *oldname, const char *newname);
DllExport int win32_unlink(const char *f);
DllExport int win32_utime(const char *f, struct utimbuf *t);
DllExport int win32_gettimeofday(struct timeval *tp, void *not_used);
DllExport int win32_uname(struct utsname *n);
DllExport int win32_wait(int *status);
DllExport int win32_waitpid(int pid, int *status, int flags);
DllExport int win32_kill(int pid, int sig);
DllExport unsigned long win32_os_id(void);
DllExport void* win32_dynaload(const char*filename);
DllExport int win32_access(const char *path, int mode);
DllExport int win32_chmod(const char *path, int mode);
DllExport int win32_getpid(void);
DllExport char * win32_crypt(const char *txt, const char *salt);
DllExport void * win32_get_childenv(void);
DllExport void win32_free_childenv(void* d);
DllExport void win32_clearenv(void);
DllExport char * win32_get_childdir(void);
DllExport void win32_free_childdir(char* d);
DllExport Sighandler_t win32_signal(int sig, Sighandler_t subcode);
END_EXTERN_C
#undef alarm
#define alarm win32_alarm
#undef strerror
#define strerror win32_strerror
/*
* the following six(6) is #define in stdio.h
*/
#ifndef WIN32IO_IS_STDIO
#undef errno
#undef environ
#undef stderr
#undef stdin
#undef stdout
#undef ferror
#undef feof
#undef fclose
#undef pipe
#undef pause
#undef sleep
#undef times
#undef ioctl
#undef unlink
#undef utime
#undef gettimeofday
#undef uname
#undef wait
#if defined(__MINGW32__)
#undef fileno
#endif
#define stderr win32_stderr()
#define stdout win32_stdout()
#define stdin win32_stdin()
#define feof(f) win32_feof(f)
#define ferror(f) win32_ferror(f)
#define errno (*win32_errno())
#define environ (*win32_environ())
/*
* redirect to our own version
*/
#undef fprintf
#define fprintf win32_fprintf
#define vfprintf win32_vfprintf
#define printf win32_printf
#define vprintf win32_vprintf
#define fread(buf,size,count,f) win32_fread(buf,size,count,f)
#define fwrite(buf,size,count,f) win32_fwrite(buf,size,count,f)
#define fopen win32_fopen
#undef fdopen
#define fdopen win32_fdopen
#define freopen win32_freopen
#define fclose(f) win32_fclose(f)
#define fputs(s,f) win32_fputs(s,f)
#define fputc(c,f) win32_fputc(c,f)
#define ungetc(c,f) win32_ungetc(c,f)
#undef getc
#define getc(f) win32_getc(f)
#define fileno(f) win32_fileno(f)
#define clearerr(f) win32_clearerr(f)
#define fflush(f) win32_fflush(f)
#define ftell(f) win32_ftell(f)
#define fseek(f,o,w) win32_fseek(f,o,w)
#define fgetpos(f,p) win32_fgetpos(f,p)
#define fsetpos(f,p) win32_fsetpos(f,p)
#define rewind(f) win32_rewind(f)
#define tmpfile() win32_tmpfile()
#define abort() win32_abort()
#define fstat(fd,bufptr) win32_fstat(fd,bufptr)
#define stat(pth,bufptr) win32_stat(pth,bufptr)
#define longpath(pth) win32_longpath(pth)
#define ansipath(pth) win32_ansipath(pth)
#define rename(old,new) win32_rename(old,new)
#define setmode(fd,mode) win32_setmode(fd,mode)
#define chsize(fd,sz) win32_chsize(fd,sz)
#define lseek(fd,offset,orig) win32_lseek(fd,offset,orig)
#define tell(fd) win32_tell(fd)
#define dup(fd) win32_dup(fd)
#define dup2(fd1,fd2) win32_dup2(fd1,fd2)
#define open win32_open
#define close(fd) win32_close(fd)
#define eof(fd) win32_eof(fd)
#define isatty(fd) win32_isatty(fd)
#define read(fd,b,s) win32_read(fd,b,s)
#define write(fd,b,s) win32_write(fd,b,s)
#define _open_osfhandle win32_open_osfhandle
#define _get_osfhandle win32_get_osfhandle
#define spawnvp win32_spawnvp
#define mkdir win32_mkdir
#define rmdir win32_rmdir
#define chdir win32_chdir
#define flock(fd,o) win32_flock(fd,o)
#define execv win32_execv
#define execvp win32_execvp
#define perror win32_perror
#define setbuf win32_setbuf
#define setvbuf win32_setvbuf
#undef flushall
#define flushall win32_flushall
#undef fcloseall
#define fcloseall win32_fcloseall
#define fgets win32_fgets
#define gets win32_gets
#define fgetc win32_fgetc
#undef putc
#define putc win32_putc
#define puts win32_puts
#undef getchar
#define getchar win32_getchar
#undef putchar
#define putchar win32_putchar
#define access(p,m) win32_access(p,m)
#define chmod(p,m) win32_chmod(p,m)
#if !defined(MYMALLOC) || !defined(PERL_CORE)
#undef malloc
#undef calloc
#undef realloc
#undef free
#define malloc win32_malloc
#define calloc win32_calloc
#define realloc win32_realloc
#define free win32_free
#endif
/* XXX Why are APIs like sleep(), times() etc. inside a block
* XXX guarded by "#ifndef WIN32IO_IS_STDIO"?
*/
#define pipe(fd) win32_pipe((fd), 512, O_BINARY)
#define pause() win32_sleep((32767L << 16) + 32767)
#define sleep win32_sleep
#define times win32_times
#define ioctl win32_ioctl
#define link win32_link
#define unlink win32_unlink
#define utime win32_utime
#define gettimeofday win32_gettimeofday
#define uname win32_uname
#define wait win32_wait
#define waitpid win32_waitpid
#define kill win32_kill
#define opendir win32_opendir
#define readdir win32_readdir
#define telldir win32_telldir
#define seekdir win32_seekdir
#define rewinddir win32_rewinddir
#define closedir win32_closedir
#define os_id win32_os_id
#define getpid win32_getpid
#undef crypt
#define crypt(t,s) win32_crypt(t,s)
#undef get_childenv
#undef free_childenv
#undef clearenv
#undef get_childdir
#undef free_childdir
#define get_childenv() win32_get_childenv()
#define free_childenv(d) win32_free_childenv(d)
#define clearenv() win32_clearenv()
#define get_childdir() win32_get_childdir()
#define free_childdir(d) win32_free_childdir(d)
#undef getenv
#define getenv win32_getenv
#undef putenv
#define putenv win32_putenv
#endif /* WIN32IO_IS_STDIO */
#endif /* WIN32IOP_H */
|
<?php
/*
* This file is part of the symfony package.
* (c) 2004-2006 Fabien Potencier <fabien.potencier@symfony-project.com>
* (c) 2004-2006 Sean Kerr <sean@code-box.org>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/**
* sfPDODatabase provides connectivity for the PDO database abstraction layer.
*
* @package symfony
* @subpackage database
* @author Daniel Swarbrick (daniel@pressure.net.nz)
* @author Fabien Potencier <fabien.potencier@symfony-project.com>
* @author Sean Kerr <sean@code-box.org>
* @author Dustin Whittle <dustin.whittle@symfony-project.com>
* @version SVN: $Id: sfPDODatabase.class.php 17858 2009-05-01 21:22:50Z FabianLange $
*/
class sfPDODatabase extends sfDatabase
{
/**
* Connects to the database.
*
* @throws <b>sfDatabaseException</b> If a connection could not be created
*/
public function connect()
{
// determine how to get our parameters
$method = $this->getParameter('method', 'dsn');
// get parameters
switch ($method)
{
case 'dsn':
$dsn = $this->getParameter('dsn');
if ($dsn == null)
{
// missing required dsn parameter
throw new sfDatabaseException('Database configuration specifies method "dsn", but is missing dsn parameter.');
}
break;
}
try
{
$pdo_class = $this->getParameter('class', 'PDO');
$username = $this->getParameter('username');
$password = $this->getParameter('password');
$persistent = $this->getParameter('persistent');
$options = ($persistent) ? array(PDO::ATTR_PERSISTENT => true) : array();
$this->connection = new $pdo_class($dsn, $username, $password, $options);
}
catch (PDOException $e)
{
throw new sfDatabaseException($e->getMessage());
}
// lets generate exceptions instead of silent failures
if (sfConfig::get('sf_debug'))
{
$this->connection->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
}
else
{
$this->connection->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_SILENT);
}
// compatability
$compatability = $this->getParameter('compat');
if ($compatability)
{
$this->connection->setAttribute(PDO::ATTR_CASE, PDO::CASE_NATURAL);
}
// nulls
$nulls = $this->getParameter('nulls');
if ($nulls)
{
$this->connection->setAttribute(PDO::ATTR_ORACLE_NULLS, PDO::NULL_EMPTY_STRING);
}
// auto commit
$autocommit = $this->getParameter('autocommit');
if ($autocommit)
{
$this->connection->setAttribute(PDO::ATTR_AUTOCOMMIT, true);
}
$this->resource = $this->connection;
}
/**
* Execute the shutdown procedure.
*
* @return void
*/
public function shutdown ()
{
if ($this->connection !== null)
{
@$this->connection = null;
}
}
/**
* Magic method for calling PDO directly via sfPDODatabase
*
* @param string $method
* @param array $arguments
* @return mixed
*/
public function __call($method, $arguments)
{
return $this->getConnection()->$method($arguments);
}
}
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const Command = require('../ember-cli/lib/models/command');
const test_1 = require("../tasks/test");
const config_1 = require("../models/config");
const common_tags_1 = require("common-tags");
const config = config_1.CliConfig.fromProject() || config_1.CliConfig.fromGlobal();
const testConfigDefaults = config.getPaths('defaults.build', [
'progress', 'poll'
]);
const TestCommand = Command.extend({
name: 'test',
aliases: ['t'],
description: 'Run unit tests in existing project.',
works: 'insideProject',
availableOptions: [
{
name: 'watch',
type: Boolean,
aliases: ['w'],
description: 'Run build when files change.'
},
{
name: 'code-coverage',
type: Boolean,
default: false,
aliases: ['cc'],
description: 'Coverage report will be in the coverage/ directory.'
},
{
name: 'config',
type: String,
aliases: ['c'],
description: common_tags_1.oneLine `Use a specific config file.
Defaults to the karma config file in .angular-cli.json.`
},
{
name: 'single-run',
type: Boolean,
aliases: ['sr'],
description: 'Run tests a single time.'
},
{
name: 'progress',
type: Boolean,
default: testConfigDefaults['progress'],
description: 'Log progress to the console while in progress.'
},
{
name: 'browsers',
type: String,
description: 'Override which browsers tests are run against.'
},
{
name: 'colors',
type: Boolean,
description: 'Enable or disable colors in the output (reporters and logs).'
},
{
name: 'log-level',
type: String,
description: 'Level of logging.'
},
{
name: 'port',
type: Number,
description: 'Port where the web server will be listening.'
},
{
name: 'reporters',
type: String,
description: 'List of reporters to use.'
},
{
name: 'sourcemaps',
type: Boolean,
default: true,
aliases: ['sm', 'sourcemap'],
description: 'Output sourcemaps.'
},
{
name: 'poll',
type: Number,
default: testConfigDefaults['poll'],
description: 'Enable and define the file watching poll time period (milliseconds).'
},
{
name: 'environment',
type: String,
aliases: ['e'],
description: 'Defines the build environment.'
},
{
name: 'app',
type: String,
aliases: ['a'],
description: 'Specifies app name to use.'
}
],
run: function (commandOptions) {
const testTask = new test_1.default({
ui: this.ui,
project: this.project
});
if (commandOptions.watch !== undefined && !commandOptions.watch) {
// if not watching ensure karma is doing a single run
commandOptions.singleRun = true;
}
return testTask.run(commandOptions);
}
});
TestCommand.overrideCore = true;
exports.default = TestCommand;
//# sourceMappingURL=/users/hansl/sources/angular-cli/commands/test.js.map |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.10"/>
<title>0.9.9 API documentation: Vector types with precision qualifiers</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { init_search(); });
</script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectlogo"><img alt="Logo" src="logo-mini.png"/></td>
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">0.9.9 API documentation
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.10 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="modules.html"><span>Modules</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</li>
</ul>
</div>
</div><!-- top -->
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#typedef-members">Typedefs</a> </div>
<div class="headertitle">
<div class="title">Vector types with precision qualifiers<div class="ingroups"><a class="el" href="a00280.html">Core features</a></div></div> </div>
</div><!--header-->
<div class="contents">
<p>Vector types with precision qualifiers which may result in various precision in term of ULPs.
<a href="#details">More...</a></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="typedef-members"></a>
Typedefs</h2></td></tr>
<tr class="memitem:gac6c781a85f012d77a75310a3058702c2"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, bool, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gac6c781a85f012d77a75310a3058702c2">highp_bvec2</a></td></tr>
<tr class="memdesc:gac6c781a85f012d77a75310a3058702c2"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of high qualifier bool numbers. <a href="a00282.html#gac6c781a85f012d77a75310a3058702c2">More...</a><br /></td></tr>
<tr class="separator:gac6c781a85f012d77a75310a3058702c2"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaedb70027d89a0a405046aefda4eabaa6"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, bool, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaedb70027d89a0a405046aefda4eabaa6">highp_bvec3</a></td></tr>
<tr class="memdesc:gaedb70027d89a0a405046aefda4eabaa6"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of high qualifier bool numbers. <a href="a00282.html#gaedb70027d89a0a405046aefda4eabaa6">More...</a><br /></td></tr>
<tr class="separator:gaedb70027d89a0a405046aefda4eabaa6"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaee663ff64429443ab07a5327074192f6"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, bool, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaee663ff64429443ab07a5327074192f6">highp_bvec4</a></td></tr>
<tr class="memdesc:gaee663ff64429443ab07a5327074192f6"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of high qualifier bool numbers. <a href="a00282.html#gaee663ff64429443ab07a5327074192f6">More...</a><br /></td></tr>
<tr class="separator:gaee663ff64429443ab07a5327074192f6"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gab98d77cca255914f5e29697fcbc2d975"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, double, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gab98d77cca255914f5e29697fcbc2d975">highp_dvec2</a></td></tr>
<tr class="memdesc:gab98d77cca255914f5e29697fcbc2d975"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of high double-qualifier floating-point numbers. <a href="a00282.html#gab98d77cca255914f5e29697fcbc2d975">More...</a><br /></td></tr>
<tr class="separator:gab98d77cca255914f5e29697fcbc2d975"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gab24dc20dcdc5b71282634bdbf6b70105"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, double, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gab24dc20dcdc5b71282634bdbf6b70105">highp_dvec3</a></td></tr>
<tr class="memdesc:gab24dc20dcdc5b71282634bdbf6b70105"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of high double-qualifier floating-point numbers. <a href="a00282.html#gab24dc20dcdc5b71282634bdbf6b70105">More...</a><br /></td></tr>
<tr class="separator:gab24dc20dcdc5b71282634bdbf6b70105"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gab654f4ed4a99d64a6cfc65320c2a7590"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, double, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gab654f4ed4a99d64a6cfc65320c2a7590">highp_dvec4</a></td></tr>
<tr class="memdesc:gab654f4ed4a99d64a6cfc65320c2a7590"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of high double-qualifier floating-point numbers. <a href="a00282.html#gab654f4ed4a99d64a6cfc65320c2a7590">More...</a><br /></td></tr>
<tr class="separator:gab654f4ed4a99d64a6cfc65320c2a7590"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaa18f6b80b41c214f10666948539c1f93"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaa18f6b80b41c214f10666948539c1f93">highp_ivec2</a></td></tr>
<tr class="memdesc:gaa18f6b80b41c214f10666948539c1f93"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of high qualifier signed integer numbers. <a href="a00282.html#gaa18f6b80b41c214f10666948539c1f93">More...</a><br /></td></tr>
<tr class="separator:gaa18f6b80b41c214f10666948539c1f93"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga7dd782c3ef5719bc6d5c3ca826b8ad18"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga7dd782c3ef5719bc6d5c3ca826b8ad18">highp_ivec3</a></td></tr>
<tr class="memdesc:ga7dd782c3ef5719bc6d5c3ca826b8ad18"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of high qualifier signed integer numbers. <a href="a00282.html#ga7dd782c3ef5719bc6d5c3ca826b8ad18">More...</a><br /></td></tr>
<tr class="separator:ga7dd782c3ef5719bc6d5c3ca826b8ad18"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gafb84dccdf5d82443df3ffc8428dcaf3e"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gafb84dccdf5d82443df3ffc8428dcaf3e">highp_ivec4</a></td></tr>
<tr class="memdesc:gafb84dccdf5d82443df3ffc8428dcaf3e"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of high qualifier signed integer numbers. <a href="a00282.html#gafb84dccdf5d82443df3ffc8428dcaf3e">More...</a><br /></td></tr>
<tr class="separator:gafb84dccdf5d82443df3ffc8428dcaf3e"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gad5dd50da9e37387ca6b4e6f9c80fe6f8"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, unsigned int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gad5dd50da9e37387ca6b4e6f9c80fe6f8">highp_uvec2</a></td></tr>
<tr class="memdesc:gad5dd50da9e37387ca6b4e6f9c80fe6f8"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of high qualifier unsigned integer numbers. <a href="a00282.html#gad5dd50da9e37387ca6b4e6f9c80fe6f8">More...</a><br /></td></tr>
<tr class="separator:gad5dd50da9e37387ca6b4e6f9c80fe6f8"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaef61508dd40ec523416697982f9ceaae"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, unsigned int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaef61508dd40ec523416697982f9ceaae">highp_uvec3</a></td></tr>
<tr class="memdesc:gaef61508dd40ec523416697982f9ceaae"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of high qualifier unsigned integer numbers. <a href="a00282.html#gaef61508dd40ec523416697982f9ceaae">More...</a><br /></td></tr>
<tr class="separator:gaef61508dd40ec523416697982f9ceaae"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaeebd7dd9f3e678691f8620241e5f9221"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, unsigned int, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaeebd7dd9f3e678691f8620241e5f9221">highp_uvec4</a></td></tr>
<tr class="memdesc:gaeebd7dd9f3e678691f8620241e5f9221"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of high qualifier unsigned integer numbers. <a href="a00282.html#gaeebd7dd9f3e678691f8620241e5f9221">More...</a><br /></td></tr>
<tr class="separator:gaeebd7dd9f3e678691f8620241e5f9221"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaa92c1954d71b1e7914874bd787b43d1c"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, float, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaa92c1954d71b1e7914874bd787b43d1c">highp_vec2</a></td></tr>
<tr class="memdesc:gaa92c1954d71b1e7914874bd787b43d1c"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of high single-qualifier floating-point numbers. <a href="a00282.html#gaa92c1954d71b1e7914874bd787b43d1c">More...</a><br /></td></tr>
<tr class="separator:gaa92c1954d71b1e7914874bd787b43d1c"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaca61dfaccbf2f58f2d8063a4e76b44a9"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, float, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaca61dfaccbf2f58f2d8063a4e76b44a9">highp_vec3</a></td></tr>
<tr class="memdesc:gaca61dfaccbf2f58f2d8063a4e76b44a9"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of high single-qualifier floating-point numbers. <a href="a00282.html#gaca61dfaccbf2f58f2d8063a4e76b44a9">More...</a><br /></td></tr>
<tr class="separator:gaca61dfaccbf2f58f2d8063a4e76b44a9"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gad281decae52948b82feb3a9db8f63a7b"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, float, highp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gad281decae52948b82feb3a9db8f63a7b">highp_vec4</a></td></tr>
<tr class="memdesc:gad281decae52948b82feb3a9db8f63a7b"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of high single-qualifier floating-point numbers. <a href="a00282.html#gad281decae52948b82feb3a9db8f63a7b">More...</a><br /></td></tr>
<tr class="separator:gad281decae52948b82feb3a9db8f63a7b"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga5a5452140650988b94d5716e4d872465"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, bool, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga5a5452140650988b94d5716e4d872465">lowp_bvec2</a></td></tr>
<tr class="memdesc:ga5a5452140650988b94d5716e4d872465"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of low qualifier bool numbers. <a href="a00282.html#ga5a5452140650988b94d5716e4d872465">More...</a><br /></td></tr>
<tr class="separator:ga5a5452140650988b94d5716e4d872465"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga79e0922a977662a8fd39d7829be3908b"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, bool, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga79e0922a977662a8fd39d7829be3908b">lowp_bvec3</a></td></tr>
<tr class="memdesc:ga79e0922a977662a8fd39d7829be3908b"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of low qualifier bool numbers. <a href="a00282.html#ga79e0922a977662a8fd39d7829be3908b">More...</a><br /></td></tr>
<tr class="separator:ga79e0922a977662a8fd39d7829be3908b"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga15ac87724048ab7169bb5d3572939dd3"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, bool, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga15ac87724048ab7169bb5d3572939dd3">lowp_bvec4</a></td></tr>
<tr class="memdesc:ga15ac87724048ab7169bb5d3572939dd3"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of low qualifier bool numbers. <a href="a00282.html#ga15ac87724048ab7169bb5d3572939dd3">More...</a><br /></td></tr>
<tr class="separator:ga15ac87724048ab7169bb5d3572939dd3"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga108086730d086b7f6f7a033955dfb9c3"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, double, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga108086730d086b7f6f7a033955dfb9c3">lowp_dvec2</a></td></tr>
<tr class="memdesc:ga108086730d086b7f6f7a033955dfb9c3"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of low double-qualifier floating-point numbers. <a href="a00282.html#ga108086730d086b7f6f7a033955dfb9c3">More...</a><br /></td></tr>
<tr class="separator:ga108086730d086b7f6f7a033955dfb9c3"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga42c518b2917e19ce6946a84c64a3a4b2"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, double, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga42c518b2917e19ce6946a84c64a3a4b2">lowp_dvec3</a></td></tr>
<tr class="memdesc:ga42c518b2917e19ce6946a84c64a3a4b2"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of low double-qualifier floating-point numbers. <a href="a00282.html#ga42c518b2917e19ce6946a84c64a3a4b2">More...</a><br /></td></tr>
<tr class="separator:ga42c518b2917e19ce6946a84c64a3a4b2"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga0b4432cb8d910e406576d10d802e190d"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, double, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga0b4432cb8d910e406576d10d802e190d">lowp_dvec4</a></td></tr>
<tr class="memdesc:ga0b4432cb8d910e406576d10d802e190d"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of low double-qualifier floating-point numbers. <a href="a00282.html#ga0b4432cb8d910e406576d10d802e190d">More...</a><br /></td></tr>
<tr class="separator:ga0b4432cb8d910e406576d10d802e190d"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga8433c6c1fdd80c0a83941d94aff73fa0"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga8433c6c1fdd80c0a83941d94aff73fa0">lowp_ivec2</a></td></tr>
<tr class="memdesc:ga8433c6c1fdd80c0a83941d94aff73fa0"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of low qualifier signed integer numbers. <a href="a00282.html#ga8433c6c1fdd80c0a83941d94aff73fa0">More...</a><br /></td></tr>
<tr class="separator:ga8433c6c1fdd80c0a83941d94aff73fa0"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gac1a86a75b3c68ebb704d7094043669d6"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gac1a86a75b3c68ebb704d7094043669d6">lowp_ivec3</a></td></tr>
<tr class="memdesc:gac1a86a75b3c68ebb704d7094043669d6"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of low qualifier signed integer numbers. <a href="a00282.html#gac1a86a75b3c68ebb704d7094043669d6">More...</a><br /></td></tr>
<tr class="separator:gac1a86a75b3c68ebb704d7094043669d6"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga27fc23da61859cd6356326c5f1c796de"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga27fc23da61859cd6356326c5f1c796de">lowp_ivec4</a></td></tr>
<tr class="memdesc:ga27fc23da61859cd6356326c5f1c796de"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of low qualifier signed integer numbers. <a href="a00282.html#ga27fc23da61859cd6356326c5f1c796de">More...</a><br /></td></tr>
<tr class="separator:ga27fc23da61859cd6356326c5f1c796de"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga752ee45136011301b64afd8c310c47a4"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, unsigned int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga752ee45136011301b64afd8c310c47a4">lowp_uvec2</a></td></tr>
<tr class="memdesc:ga752ee45136011301b64afd8c310c47a4"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of low qualifier unsigned integer numbers. <a href="a00282.html#ga752ee45136011301b64afd8c310c47a4">More...</a><br /></td></tr>
<tr class="separator:ga752ee45136011301b64afd8c310c47a4"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga7b2efbdd6bdc2f8250c57f3e5dc9a292"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, unsigned int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga7b2efbdd6bdc2f8250c57f3e5dc9a292">lowp_uvec3</a></td></tr>
<tr class="memdesc:ga7b2efbdd6bdc2f8250c57f3e5dc9a292"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of low qualifier unsigned integer numbers. <a href="a00282.html#ga7b2efbdd6bdc2f8250c57f3e5dc9a292">More...</a><br /></td></tr>
<tr class="separator:ga7b2efbdd6bdc2f8250c57f3e5dc9a292"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga5e6a632ec1165cf9f54ceeaa5e9b2b1e"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, unsigned int, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga5e6a632ec1165cf9f54ceeaa5e9b2b1e">lowp_uvec4</a></td></tr>
<tr class="memdesc:ga5e6a632ec1165cf9f54ceeaa5e9b2b1e"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of low qualifier unsigned integer numbers. <a href="a00282.html#ga5e6a632ec1165cf9f54ceeaa5e9b2b1e">More...</a><br /></td></tr>
<tr class="separator:ga5e6a632ec1165cf9f54ceeaa5e9b2b1e"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga30e8baef5d56d5c166872a2bc00f36e9"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, float, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga30e8baef5d56d5c166872a2bc00f36e9">lowp_vec2</a></td></tr>
<tr class="memdesc:ga30e8baef5d56d5c166872a2bc00f36e9"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of low single-qualifier floating-point numbers. <a href="a00282.html#ga30e8baef5d56d5c166872a2bc00f36e9">More...</a><br /></td></tr>
<tr class="separator:ga30e8baef5d56d5c166872a2bc00f36e9"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga868e8e4470a3ef97c7ee3032bf90dc79"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, float, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga868e8e4470a3ef97c7ee3032bf90dc79">lowp_vec3</a></td></tr>
<tr class="memdesc:ga868e8e4470a3ef97c7ee3032bf90dc79"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of low single-qualifier floating-point numbers. <a href="a00282.html#ga868e8e4470a3ef97c7ee3032bf90dc79">More...</a><br /></td></tr>
<tr class="separator:ga868e8e4470a3ef97c7ee3032bf90dc79"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gace3acb313c800552a9411953eb8b2ed7"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, float, lowp > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gace3acb313c800552a9411953eb8b2ed7">lowp_vec4</a></td></tr>
<tr class="memdesc:gace3acb313c800552a9411953eb8b2ed7"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of low single-qualifier floating-point numbers. <a href="a00282.html#gace3acb313c800552a9411953eb8b2ed7">More...</a><br /></td></tr>
<tr class="separator:gace3acb313c800552a9411953eb8b2ed7"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga1e743764869efa9223c2bcefccedaddc"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, bool, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga1e743764869efa9223c2bcefccedaddc">mediump_bvec2</a></td></tr>
<tr class="memdesc:ga1e743764869efa9223c2bcefccedaddc"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of medium qualifier bool numbers. <a href="a00282.html#ga1e743764869efa9223c2bcefccedaddc">More...</a><br /></td></tr>
<tr class="separator:ga1e743764869efa9223c2bcefccedaddc"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga50c783c25082882ef00fe2e5cddba4aa"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, bool, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga50c783c25082882ef00fe2e5cddba4aa">mediump_bvec3</a></td></tr>
<tr class="memdesc:ga50c783c25082882ef00fe2e5cddba4aa"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of medium qualifier bool numbers. <a href="a00282.html#ga50c783c25082882ef00fe2e5cddba4aa">More...</a><br /></td></tr>
<tr class="separator:ga50c783c25082882ef00fe2e5cddba4aa"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga0be2c682258604a35004f088782a9645"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, bool, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga0be2c682258604a35004f088782a9645">mediump_bvec4</a></td></tr>
<tr class="memdesc:ga0be2c682258604a35004f088782a9645"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of medium qualifier bool numbers. <a href="a00282.html#ga0be2c682258604a35004f088782a9645">More...</a><br /></td></tr>
<tr class="separator:ga0be2c682258604a35004f088782a9645"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga2f4f6e9a69a0281d06940fd0990cafc3"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, double, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga2f4f6e9a69a0281d06940fd0990cafc3">mediump_dvec2</a></td></tr>
<tr class="memdesc:ga2f4f6e9a69a0281d06940fd0990cafc3"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of medium double-qualifier floating-point numbers. <a href="a00282.html#ga2f4f6e9a69a0281d06940fd0990cafc3">More...</a><br /></td></tr>
<tr class="separator:ga2f4f6e9a69a0281d06940fd0990cafc3"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga61c3b1dff4ec7c878af80503141b9f37"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, double, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga61c3b1dff4ec7c878af80503141b9f37">mediump_dvec3</a></td></tr>
<tr class="memdesc:ga61c3b1dff4ec7c878af80503141b9f37"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of medium double-qualifier floating-point numbers. <a href="a00282.html#ga61c3b1dff4ec7c878af80503141b9f37">More...</a><br /></td></tr>
<tr class="separator:ga61c3b1dff4ec7c878af80503141b9f37"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga23a8bca00914a51542bfea13a4778186"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, double, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga23a8bca00914a51542bfea13a4778186">mediump_dvec4</a></td></tr>
<tr class="memdesc:ga23a8bca00914a51542bfea13a4778186"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of medium double-qualifier floating-point numbers. <a href="a00282.html#ga23a8bca00914a51542bfea13a4778186">More...</a><br /></td></tr>
<tr class="separator:ga23a8bca00914a51542bfea13a4778186"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gac57496299d276ed97044074097bd5e2c"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gac57496299d276ed97044074097bd5e2c">mediump_ivec2</a></td></tr>
<tr class="memdesc:gac57496299d276ed97044074097bd5e2c"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of medium qualifier signed integer numbers. <a href="a00282.html#gac57496299d276ed97044074097bd5e2c">More...</a><br /></td></tr>
<tr class="separator:gac57496299d276ed97044074097bd5e2c"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga27cfb51e0dbe15bba27a14a8590e8466"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga27cfb51e0dbe15bba27a14a8590e8466">mediump_ivec3</a></td></tr>
<tr class="memdesc:ga27cfb51e0dbe15bba27a14a8590e8466"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of medium qualifier signed integer numbers. <a href="a00282.html#ga27cfb51e0dbe15bba27a14a8590e8466">More...</a><br /></td></tr>
<tr class="separator:ga27cfb51e0dbe15bba27a14a8590e8466"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga92a204c37e66ac6c1dc7ae91142f2ea5"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga92a204c37e66ac6c1dc7ae91142f2ea5">mediump_ivec4</a></td></tr>
<tr class="memdesc:ga92a204c37e66ac6c1dc7ae91142f2ea5"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of medium qualifier signed integer numbers. <a href="a00282.html#ga92a204c37e66ac6c1dc7ae91142f2ea5">More...</a><br /></td></tr>
<tr class="separator:ga92a204c37e66ac6c1dc7ae91142f2ea5"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gaa3b4f7806dad03d83bb3da0baa1e3b9b"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, unsigned int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gaa3b4f7806dad03d83bb3da0baa1e3b9b">mediump_uvec2</a></td></tr>
<tr class="memdesc:gaa3b4f7806dad03d83bb3da0baa1e3b9b"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of medium qualifier unsigned integer numbers. <a href="a00282.html#gaa3b4f7806dad03d83bb3da0baa1e3b9b">More...</a><br /></td></tr>
<tr class="separator:gaa3b4f7806dad03d83bb3da0baa1e3b9b"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga83b7df38feefbb357f3673d950fafef7"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, unsigned int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga83b7df38feefbb357f3673d950fafef7">mediump_uvec3</a></td></tr>
<tr class="memdesc:ga83b7df38feefbb357f3673d950fafef7"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of medium qualifier unsigned integer numbers. <a href="a00282.html#ga83b7df38feefbb357f3673d950fafef7">More...</a><br /></td></tr>
<tr class="separator:ga83b7df38feefbb357f3673d950fafef7"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga64ed0deb6573375b7016daf82ffd53a7"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, unsigned int, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga64ed0deb6573375b7016daf82ffd53a7">mediump_uvec4</a></td></tr>
<tr class="memdesc:ga64ed0deb6573375b7016daf82ffd53a7"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of medium qualifier unsigned integer numbers. <a href="a00282.html#ga64ed0deb6573375b7016daf82ffd53a7">More...</a><br /></td></tr>
<tr class="separator:ga64ed0deb6573375b7016daf82ffd53a7"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:gabc61976261c406520c7a8e4d946dc3f0"><td class="memItemLeft" align="right" valign="top">typedef vec< 2, float, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gabc61976261c406520c7a8e4d946dc3f0">mediump_vec2</a></td></tr>
<tr class="memdesc:gabc61976261c406520c7a8e4d946dc3f0"><td class="mdescLeft"> </td><td class="mdescRight">2 components vector of medium single-qualifier floating-point numbers. <a href="a00282.html#gabc61976261c406520c7a8e4d946dc3f0">More...</a><br /></td></tr>
<tr class="separator:gabc61976261c406520c7a8e4d946dc3f0"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga2384e263df19f1404b733016eff78fca"><td class="memItemLeft" align="right" valign="top">typedef vec< 3, float, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga2384e263df19f1404b733016eff78fca">mediump_vec3</a></td></tr>
<tr class="memdesc:ga2384e263df19f1404b733016eff78fca"><td class="mdescLeft"> </td><td class="mdescRight">3 components vector of medium single-qualifier floating-point numbers. <a href="a00282.html#ga2384e263df19f1404b733016eff78fca">More...</a><br /></td></tr>
<tr class="separator:ga2384e263df19f1404b733016eff78fca"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ga5c6978d3ffba06738416a33083853fc0"><td class="memItemLeft" align="right" valign="top">typedef vec< 4, float, mediump > </td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#ga5c6978d3ffba06738416a33083853fc0">mediump_vec4</a></td></tr>
<tr class="memdesc:ga5c6978d3ffba06738416a33083853fc0"><td class="mdescLeft"> </td><td class="mdescRight">4 components vector of medium single-qualifier floating-point numbers. <a href="a00282.html#ga5c6978d3ffba06738416a33083853fc0">More...</a><br /></td></tr>
<tr class="separator:ga5c6978d3ffba06738416a33083853fc0"><td class="memSeparator" colspan="2"> </td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<p>Vector types with precision qualifiers which may result in various precision in term of ULPs. </p>
<p>GLSL allows defining qualifiers for particular variables. With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, with OpenGL ES's GLSL, these qualifiers do have an effect.</p>
<p>C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: a number of typedefs that use a particular qualifier.</p>
<p>None of these types make any guarantees about the actual qualifier used. </p>
<h2 class="groupheader">Typedef Documentation</h2>
<a class="anchor" id="gac6c781a85f012d77a75310a3058702c2"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, bool, highp > highp_bvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of high qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00192_source.html#l00016">16</a> of file <a class="el" href="a00192_source.html">vector_bool2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaedb70027d89a0a405046aefda4eabaa6"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, bool, highp > highp_bvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of high qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00194_source.html#l00016">16</a> of file <a class="el" href="a00194_source.html">vector_bool3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaee663ff64429443ab07a5327074192f6"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, bool, highp > highp_bvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of high qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00196_source.html#l00016">16</a> of file <a class="el" href="a00196_source.html">vector_bool4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gab98d77cca255914f5e29697fcbc2d975"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, f64, highp > highp_dvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of high double-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00201_source.html#l00016">16</a> of file <a class="el" href="a00201_source.html">vector_double2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gab24dc20dcdc5b71282634bdbf6b70105"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, f64, highp > highp_dvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of high double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00203_source.html#l00017">17</a> of file <a class="el" href="a00203_source.html">vector_double3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gab654f4ed4a99d64a6cfc65320c2a7590"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, f64, highp > highp_dvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of high double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00205_source.html#l00018">18</a> of file <a class="el" href="a00205_source.html">vector_double4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaa18f6b80b41c214f10666948539c1f93"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, i32, highp > highp_ivec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of high qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00217_source.html#l00016">16</a> of file <a class="el" href="a00217_source.html">vector_int2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga7dd782c3ef5719bc6d5c3ca826b8ad18"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, i32, highp > highp_ivec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of high qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00219_source.html#l00016">16</a> of file <a class="el" href="a00219_source.html">vector_int3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gafb84dccdf5d82443df3ffc8428dcaf3e"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, i32, highp > highp_ivec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of high qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00221_source.html#l00016">16</a> of file <a class="el" href="a00221_source.html">vector_int4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gad5dd50da9e37387ca6b4e6f9c80fe6f8"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, u32, highp > highp_uvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of high qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00229_source.html#l00016">16</a> of file <a class="el" href="a00229_source.html">vector_uint2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaef61508dd40ec523416697982f9ceaae"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, u32, highp > highp_uvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of high qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00231_source.html#l00016">16</a> of file <a class="el" href="a00231_source.html">vector_uint3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaeebd7dd9f3e678691f8620241e5f9221"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, u32, highp > highp_uvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of high qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00233_source.html#l00016">16</a> of file <a class="el" href="a00233_source.html">vector_uint4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaa92c1954d71b1e7914874bd787b43d1c"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, float, highp > highp_vec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of high single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00209_source.html#l00016">16</a> of file <a class="el" href="a00209_source.html">vector_float2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaca61dfaccbf2f58f2d8063a4e76b44a9"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, float, highp > highp_vec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of high single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00211_source.html#l00016">16</a> of file <a class="el" href="a00211_source.html">vector_float3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gad281decae52948b82feb3a9db8f63a7b"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, float, highp > highp_vec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of high single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00213_source.html#l00016">16</a> of file <a class="el" href="a00213_source.html">vector_float4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga5a5452140650988b94d5716e4d872465"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, bool, lowp > lowp_bvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of low qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00192_source.html#l00028">28</a> of file <a class="el" href="a00192_source.html">vector_bool2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga79e0922a977662a8fd39d7829be3908b"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, bool, lowp > lowp_bvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of low qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00194_source.html#l00028">28</a> of file <a class="el" href="a00194_source.html">vector_bool3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga15ac87724048ab7169bb5d3572939dd3"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, bool, lowp > lowp_bvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of low qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00196_source.html#l00028">28</a> of file <a class="el" href="a00196_source.html">vector_bool4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga108086730d086b7f6f7a033955dfb9c3"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, f64, lowp > lowp_dvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of low double-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00201_source.html#l00028">28</a> of file <a class="el" href="a00201_source.html">vector_double2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga42c518b2917e19ce6946a84c64a3a4b2"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, f64, lowp > lowp_dvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of low double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00203_source.html#l00031">31</a> of file <a class="el" href="a00203_source.html">vector_double3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga0b4432cb8d910e406576d10d802e190d"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, f64, lowp > lowp_dvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of low double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00205_source.html#l00032">32</a> of file <a class="el" href="a00205_source.html">vector_double4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga8433c6c1fdd80c0a83941d94aff73fa0"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, i32, lowp > lowp_ivec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of low qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00217_source.html#l00028">28</a> of file <a class="el" href="a00217_source.html">vector_int2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gac1a86a75b3c68ebb704d7094043669d6"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, i32, lowp > lowp_ivec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of low qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00219_source.html#l00028">28</a> of file <a class="el" href="a00219_source.html">vector_int3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga27fc23da61859cd6356326c5f1c796de"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, i32, lowp > lowp_ivec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of low qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00221_source.html#l00028">28</a> of file <a class="el" href="a00221_source.html">vector_int4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga752ee45136011301b64afd8c310c47a4"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, u32, lowp > lowp_uvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of low qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00229_source.html#l00028">28</a> of file <a class="el" href="a00229_source.html">vector_uint2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga7b2efbdd6bdc2f8250c57f3e5dc9a292"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, u32, lowp > lowp_uvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of low qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00231_source.html#l00028">28</a> of file <a class="el" href="a00231_source.html">vector_uint3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga5e6a632ec1165cf9f54ceeaa5e9b2b1e"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, u32, lowp > lowp_uvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of low qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00233_source.html#l00028">28</a> of file <a class="el" href="a00233_source.html">vector_uint4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga30e8baef5d56d5c166872a2bc00f36e9"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, float, lowp > lowp_vec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of low single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00209_source.html#l00028">28</a> of file <a class="el" href="a00209_source.html">vector_float2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga868e8e4470a3ef97c7ee3032bf90dc79"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, float, lowp > lowp_vec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of low single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00211_source.html#l00028">28</a> of file <a class="el" href="a00211_source.html">vector_float3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gace3acb313c800552a9411953eb8b2ed7"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, float, lowp > lowp_vec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of low single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00213_source.html#l00028">28</a> of file <a class="el" href="a00213_source.html">vector_float4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga1e743764869efa9223c2bcefccedaddc"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, bool, mediump > mediump_bvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of medium qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00192_source.html#l00022">22</a> of file <a class="el" href="a00192_source.html">vector_bool2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga50c783c25082882ef00fe2e5cddba4aa"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, bool, mediump > mediump_bvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of medium qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00194_source.html#l00022">22</a> of file <a class="el" href="a00194_source.html">vector_bool3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga0be2c682258604a35004f088782a9645"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, bool, mediump > mediump_bvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of medium qualifier bool numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00196_source.html#l00022">22</a> of file <a class="el" href="a00196_source.html">vector_bool4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga2f4f6e9a69a0281d06940fd0990cafc3"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, f64, mediump > mediump_dvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of medium double-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00201_source.html#l00022">22</a> of file <a class="el" href="a00201_source.html">vector_double2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga61c3b1dff4ec7c878af80503141b9f37"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, f64, mediump > mediump_dvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of medium double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00203_source.html#l00024">24</a> of file <a class="el" href="a00203_source.html">vector_double3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga23a8bca00914a51542bfea13a4778186"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, f64, mediump > mediump_dvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of medium double-qualifier floating-point numbers. </p>
<p>There is no guarantee on the actual qualifier.</p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00205_source.html#l00025">25</a> of file <a class="el" href="a00205_source.html">vector_double4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gac57496299d276ed97044074097bd5e2c"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, i32, mediump > mediump_ivec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of medium qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00217_source.html#l00022">22</a> of file <a class="el" href="a00217_source.html">vector_int2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga27cfb51e0dbe15bba27a14a8590e8466"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, i32, mediump > mediump_ivec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of medium qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00219_source.html#l00022">22</a> of file <a class="el" href="a00219_source.html">vector_int3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga92a204c37e66ac6c1dc7ae91142f2ea5"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, i32, mediump > mediump_ivec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of medium qualifier signed integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00221_source.html#l00022">22</a> of file <a class="el" href="a00221_source.html">vector_int4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gaa3b4f7806dad03d83bb3da0baa1e3b9b"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, u32, mediump > mediump_uvec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of medium qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00229_source.html#l00022">22</a> of file <a class="el" href="a00229_source.html">vector_uint2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga83b7df38feefbb357f3673d950fafef7"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, u32, mediump > mediump_uvec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of medium qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00231_source.html#l00022">22</a> of file <a class="el" href="a00231_source.html">vector_uint3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga64ed0deb6573375b7016daf82ffd53a7"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, u32, mediump > mediump_uvec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of medium qualifier unsigned integer numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00233_source.html#l00022">22</a> of file <a class="el" href="a00233_source.html">vector_uint4_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="gabc61976261c406520c7a8e4d946dc3f0"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 2, float, mediump > mediump_vec2</td>
</tr>
</table>
</div><div class="memdoc">
<p>2 components vector of medium single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00209_source.html#l00022">22</a> of file <a class="el" href="a00209_source.html">vector_float2_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga2384e263df19f1404b733016eff78fca"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 3, float, mediump > mediump_vec3</td>
</tr>
</table>
</div><div class="memdoc">
<p>3 components vector of medium single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00211_source.html#l00022">22</a> of file <a class="el" href="a00211_source.html">vector_float3_precision.hpp</a>.</p>
</div>
</div>
<a class="anchor" id="ga5c6978d3ffba06738416a33083853fc0"></a>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">typedef vec< 4, float, mediump > mediump_vec4</td>
</tr>
</table>
</div><div class="memdoc">
<p>4 components vector of medium single-qualifier floating-point numbers. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.1.5 Vectors</a> </dd>
<dd>
<a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier</a> </dd></dl>
<p>Definition at line <a class="el" href="a00213_source.html#l00022">22</a> of file <a class="el" href="a00213_source.html">vector_float4_precision.hpp</a>.</p>
</div>
</div>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by  <a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.10
</small></address>
</body>
</html>
|
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Start Bootstrap - SB Admin Version 2.0 Demo</title>
<!-- Core CSS - Include with every page -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<link href="font-awesome/css/font-awesome.css" rel="stylesheet">
<!-- Page-Level Plugin CSS - Panels and Wells -->
<!-- SB Admin CSS - Include with every page -->
<link href="css/sb-admin.css" rel="stylesheet">
</head>
<body>
<div id="wrapper">
<nav class="navbar navbar-default navbar-fixed-top" role="navigation" style="margin-bottom: 0">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".sidebar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="index.html">SB Admin v2.0</a>
</div>
<!-- /.navbar-header -->
<ul class="nav navbar-top-links navbar-right">
<li class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
<i class="fa fa-envelope fa-fw"></i> <i class="fa fa-caret-down"></i>
</a>
<ul class="dropdown-menu dropdown-messages">
<li>
<a href="#">
<div>
<strong>John Smith</strong>
<span class="pull-right text-muted">
<em>Yesterday</em>
</span>
</div>
<div>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eleifend...</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<strong>John Smith</strong>
<span class="pull-right text-muted">
<em>Yesterday</em>
</span>
</div>
<div>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eleifend...</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<strong>John Smith</strong>
<span class="pull-right text-muted">
<em>Yesterday</em>
</span>
</div>
<div>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eleifend...</div>
</a>
</li>
<li class="divider"></li>
<li>
<a class="text-center" href="#">
<strong>Read All Messages</strong>
<i class="fa fa-angle-right"></i>
</a>
</li>
</ul>
<!-- /.dropdown-messages -->
</li>
<!-- /.dropdown -->
<li class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
<i class="fa fa-tasks fa-fw"></i> <i class="fa fa-caret-down"></i>
</a>
<ul class="dropdown-menu dropdown-tasks">
<li>
<a href="#">
<div>
<p>
<strong>Task 1</strong>
<span class="pull-right text-muted">40% Complete</span>
</p>
<div class="progress progress-striped active">
<div class="progress-bar progress-bar-success" role="progressbar" aria-valuenow="40" aria-valuemin="0" aria-valuemax="100" style="width: 40%">
<span class="sr-only">40% Complete (success)</span>
</div>
</div>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<p>
<strong>Task 2</strong>
<span class="pull-right text-muted">20% Complete</span>
</p>
<div class="progress progress-striped active">
<div class="progress-bar progress-bar-info" role="progressbar" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100" style="width: 20%">
<span class="sr-only">20% Complete</span>
</div>
</div>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<p>
<strong>Task 3</strong>
<span class="pull-right text-muted">60% Complete</span>
</p>
<div class="progress progress-striped active">
<div class="progress-bar progress-bar-warning" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100" style="width: 60%">
<span class="sr-only">60% Complete (warning)</span>
</div>
</div>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<p>
<strong>Task 4</strong>
<span class="pull-right text-muted">80% Complete</span>
</p>
<div class="progress progress-striped active">
<div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="80" aria-valuemin="0" aria-valuemax="100" style="width: 80%">
<span class="sr-only">80% Complete (danger)</span>
</div>
</div>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a class="text-center" href="#">
<strong>See All Tasks</strong>
<i class="fa fa-angle-right"></i>
</a>
</li>
</ul>
<!-- /.dropdown-tasks -->
</li>
<!-- /.dropdown -->
<li class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
<i class="fa fa-bell fa-fw"></i> <i class="fa fa-caret-down"></i>
</a>
<ul class="dropdown-menu dropdown-alerts">
<li>
<a href="#">
<div>
<i class="fa fa-comment fa-fw"></i> New Comment
<span class="pull-right text-muted small">4 minutes ago</span>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<i class="fa fa-twitter fa-fw"></i> 3 New Followers
<span class="pull-right text-muted small">12 minutes ago</span>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<i class="fa fa-envelope fa-fw"></i> Message Sent
<span class="pull-right text-muted small">4 minutes ago</span>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<i class="fa fa-tasks fa-fw"></i> New Task
<span class="pull-right text-muted small">4 minutes ago</span>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a href="#">
<div>
<i class="fa fa-upload fa-fw"></i> Server Rebooted
<span class="pull-right text-muted small">4 minutes ago</span>
</div>
</a>
</li>
<li class="divider"></li>
<li>
<a class="text-center" href="#">
<strong>See All Alerts</strong>
<i class="fa fa-angle-right"></i>
</a>
</li>
</ul>
<!-- /.dropdown-alerts -->
</li>
<!-- /.dropdown -->
<li class="dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
<i class="fa fa-user fa-fw"></i> <i class="fa fa-caret-down"></i>
</a>
<ul class="dropdown-menu dropdown-user">
<li><a href="#"><i class="fa fa-user fa-fw"></i> User Profile</a>
</li>
<li><a href="#"><i class="fa fa-gear fa-fw"></i> Settings</a>
</li>
<li class="divider"></li>
<li><a href="login.html"><i class="fa fa-sign-out fa-fw"></i> Logout</a>
</li>
</ul>
<!-- /.dropdown-user -->
</li>
<!-- /.dropdown -->
</ul>
<!-- /.navbar-top-links -->
<div class="navbar-default navbar-static-side" role="navigation">
<div class="sidebar-collapse">
<ul class="nav" id="side-menu">
<li class="sidebar-search">
<div class="input-group custom-search-form">
<input type="text" class="form-control" placeholder="Search...">
<span class="input-group-btn">
<button class="btn btn-default" type="button">
<i class="fa fa-search"></i>
</button>
</span>
</div>
<!-- /input-group -->
</li>
<li>
<a href="index.html"><i class="fa fa-dashboard fa-fw"></i> Dashboard</a>
</li>
<li>
<a href="#"><i class="fa fa-bar-chart-o fa-fw"></i> Charts<span class="fa arrow"></span></a>
<ul class="nav nav-second-level">
<li>
<a href="flot.html">Flot Charts</a>
</li>
<li>
<a href="morris.html">Morris.js Charts</a>
</li>
</ul>
<!-- /.nav-second-level -->
</li>
<li>
<a href="tables.html"><i class="fa fa-table fa-fw"></i> Tables</a>
</li>
<li>
<a href="forms.html"><i class="fa fa-edit fa-fw"></i> Forms</a>
</li>
<li>
<a href="#"><i class="fa fa-wrench fa-fw"></i> UI Elements<span class="fa arrow"></span></a>
<ul class="nav nav-second-level">
<li>
<a href="panels-wells.html">Panels and Wells</a>
</li>
<li>
<a href="buttons.html">Buttons</a>
</li>
<li>
<a href="notifications.html">Notifications</a>
</li>
<li>
<a href="typography.html">Typography</a>
</li>
<li>
<a href="grid.html">Grid</a>
</li>
</ul>
<!-- /.nav-second-level -->
</li>
<li>
<a href="#"><i class="fa fa-sitemap fa-fw"></i> Multi-Level Dropdown<span class="fa arrow"></span></a>
<ul class="nav nav-second-level">
<li>
<a href="#">Second Level Item</a>
</li>
<li>
<a href="#">Second Level Item</a>
</li>
<li>
<a href="#">Third Level <span class="fa arrow"></span></a>
<ul class="nav nav-third-level">
<li>
<a href="#">Third Level Item</a>
</li>
<li>
<a href="#">Third Level Item</a>
</li>
<li>
<a href="#">Third Level Item</a>
</li>
<li>
<a href="#">Third Level Item</a>
</li>
</ul>
<!-- /.nav-third-level -->
</li>
</ul>
<!-- /.nav-second-level -->
</li>
<li>
<a href="#"><i class="fa fa-files-o fa-fw"></i> Sample Pages<span class="fa arrow"></span></a>
<ul class="nav nav-second-level">
<li>
<a href="blank.html">Blank Page</a>
</li>
<li>
<a href="login.html">Login Page</a>
</li>
</ul>
<!-- /.nav-second-level -->
</li>
</ul>
<!-- /#side-menu -->
</div>
<!-- /.sidebar-collapse -->
</div>
<!-- /.navbar-static-side -->
</nav>
<div id="page-wrapper">
<div class="row">
<div class="col-lg-12">
<h1 class="page-header">Panels and Wells</h1>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<div class="panel panel-default">
<div class="panel-heading">
Default Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="panel panel-primary">
<div class="panel-heading">
Primary Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="panel panel-success">
<div class="panel-heading">
Success Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<div class="panel panel-info">
<div class="panel-heading">
Info Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="panel panel-warning">
<div class="panel-heading">
Warning Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="panel panel-danger">
<div class="panel-heading">
Danger Panel
</div>
<div class="panel-body">
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
<div class="panel-footer">
Panel Footer
</div>
</div>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="panel panel-default">
<div class="panel-heading">
Collapsible Accordion Panel Group
</div>
<!-- .panel-heading -->
<div class="panel-body">
<div class="panel-group" id="accordion">
<div class="panel panel-default">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-parent="#accordion" href="#collapseOne">Collapsible Group Item #1</a>
</h4>
</div>
<div id="collapseOne" class="panel-collapse collapse in">
<div class="panel-body">
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-parent="#accordion" href="#collapseTwo">Collapsible Group Item #2</a>
</h4>
</div>
<div id="collapseTwo" class="panel-collapse collapse">
<div class="panel-body">
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-parent="#accordion" href="#collapseThree">Collapsible Group Item #3</a>
</h4>
</div>
<div id="collapseThree" class="panel-collapse collapse">
<div class="panel-body">
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
</div>
</div>
</div>
</div>
</div>
<!-- .panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-6">
<div class="panel panel-default">
<div class="panel-heading">
Basic Tabs
</div>
<!-- /.panel-heading -->
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li class="active"><a href="#home" data-toggle="tab">Home</a>
</li>
<li><a href="#profile" data-toggle="tab">Profile</a>
</li>
<li><a href="#messages" data-toggle="tab">Messages</a>
</li>
<li><a href="#settings" data-toggle="tab">Settings</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="home">
<h4>Home Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="profile">
<h4>Profile Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="messages">
<h4>Messages Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="settings">
<h4>Settings Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-6 -->
<div class="col-lg-6">
<div class="panel panel-default">
<div class="panel-heading">
Pill Tabs
</div>
<!-- /.panel-heading -->
<div class="panel-body">
<!-- Nav tabs -->
<ul class="nav nav-pills">
<li class="active"><a href="#home-pills" data-toggle="tab">Home</a>
</li>
<li><a href="#profile-pills" data-toggle="tab">Profile</a>
</li>
<li><a href="#messages-pills" data-toggle="tab">Messages</a>
</li>
<li><a href="#settings-pills" data-toggle="tab">Settings</a>
</li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane fade in active" id="home-pills">
<h4>Home Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="profile-pills">
<h4>Profile Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="messages-pills">
<h4>Messages Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
<div class="tab-pane fade" id="settings-pills">
<h4>Settings Tab</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.</p>
</div>
</div>
</div>
<!-- /.panel-body -->
</div>
<!-- /.panel -->
</div>
<!-- /.col-lg-6 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-4">
<div class="well">
<h4>Normal Well</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="well well-lg">
<h4>Large Well</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
</div>
<!-- /.col-lg-4 -->
<div class="col-lg-4">
<div class="well well-sm">
<h4>Small Well</h4>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue.</p>
</div>
</div>
<!-- /.col-lg-4 -->
</div>
<!-- /.row -->
<div class="row">
<div class="col-lg-12">
<div class="jumbotron">
<h1>Jumbotron</h1>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing, posuere lectus et, fringilla augue. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum tincidunt est vitae ultrices accumsan. Aliquam ornare lacus adipiscing.</p>
<p><a class="btn btn-primary btn-lg" role="button">Learn more</a>
</p>
</div>
</div>
<!-- /.col-lg-12 -->
</div>
<!-- /.row -->
</div>
<!-- /#page-wrapper -->
</div>
<!-- /#wrapper -->
<!-- Core Scripts - Include with every page -->
<script src="js/jquery-1.10.2.js"></script>
<script src="js/bootstrap.min.js"></script>
<script src="js/plugins/metisMenu/jquery.metisMenu.js"></script>
<!-- Page-Level Plugin Scripts - Panels and Wells -->
<!-- SB Admin Scripts - Include with every page -->
<script src="js/sb-admin.js"></script>
<!-- Page-Level Demo Scripts - Panels and Wells - Use for reference -->
</body>
</html>
|
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="./../../assets/css/combined.css">
<link rel="shortcut icon" href="./../../favicon.ico" />
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script type="text/javascript">
var path = './../../';
var class_prefix = "DBUtil::";
</script>
<script src="./../../assets/js/combined.js"></script>
<title>DBUtil - Classes - FuelPHP Documentation</title>
</head>
<body>
<div id="container">
<header id="header">
<div class="table">
<h1>
<a href="http://fuelphp.com"><img height="37px" width="147px" src="./../../assets/img/fuel.png" /></a>
<strong>Documentation</strong>
</h1>
<form id="google_search">
<p>
<span id="search_clear"> </span>
<input type="submit" name="search_submit" id="search_submit" value="search" />
<input type="text" value="" id="search_input" name="search_input" />
</p>
</form>
</div>
<nav>
<div class="clear"></div>
</nav>
<a href="#" id="toc_handle">table of contents</a>
<div class="clear"></div>
</header>
<div id="cse">
<div id="cse_point"></div>
<div id="cse_content"></div>
</div>
<div id="main">
<h2>DBUtil Class</h2>
<p>The DBUtil class allows you to manage and perform routine operations on your databases.</p>
<h3 id="managing">Managing functions</h3>
<p>DBUtil allows you to create, rename, alter and drop database fields.</p>
<article>
<h4 class="method" id="method_set_connection">set_connection($connection)</h4>
<p>The <strong>set_connection</strong> method sets the default DBUtil connection for all methods.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$connection</kbd></th>
<td><strong>required</strong></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Set an alternative database connection for DBUtil
DBUtil::set_connection('my_database_group');
// Do other DBUtil calls with that connection.
...
// And set it back to default.
DBUtil::set_connection(null);
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_create_database">create_database($database, charset = null, $if_not_exists = true, $db = null)</h4>
<p>The <strong>create_database</strong> method creates a database. Will throw a Database_Exception if it cannot.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$database</kbd></th>
<td><em>required</em></td>
<td>the database name</td>
</tr>
<tr>
<th><kbd>$charset</kbd></th>
<td><em>null</em></td>
<td>the default charset</td>
</tr>
<tr>
<th><kbd>$if_not_exists</kbd></th>
<td><strong>true</strong></td>
<td>whether to use <em>IF NOT EXISTS</em></td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Create a database named `my_database`
DBUtil::create_database('my_database');
// Catch the exception
try
{
DBUtil::create_database('my_database');
}
catch(\Database_Exception $e)
{
// Creation failed...
}
// You can also set a default charset.
// CREATE DATABASE IF NOT EXISTS `new_database` DEFAULT CHARACTER SET 'utf8'
DBUtil::create_database('new_database', 'utf8');
// CREATE DATABASE IF NOT EXISTS `new_database DEFAULT CHARACTER SET 'utf8' COLLATE 'utf8_unicode_ci'
DBUtil::create_database('new_database', 'utf8_unicode_ci');
</code></pre>
</td>
</tr>
</tbody>
</table>
<p>If no charset is provided it will fall back to the charset of the active db group.</p>
</article>
<article>
<h4 class="method" id="method_drop_database">drop_database($database, $db = null)</h4>
<p>The <strong>drop_database</strong> method drops a database. Will throw a Database_Exception if it cannot.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$database</kbd></th>
<td><em>required</em></td>
<td>the database name</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Drop a database named `my_database`
DBUtil::drop_database('my_database');
// Catch the exception
try
{
DBUtil::drop_database('my_database');
}
catch(\Database_Exception $e)
{
// Drop failed...
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_table_exists">table_exists($table, $db = null)</h4>
<p>The <strong>table_exists</strong> method checks if a given table exists.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<td><kbd>$table</kbd></td>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns true if table exists, false if it does not.</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Check if table named 'my_table' exists
if(DBUtil::table_exists('my_table'))
{
// Table exists
} else
{
// Table does NOT exist, create it!
}</code>
</pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_drop_table">drop_table($table, $db = null)</h4>
<p>The <strong>drop_table</strong> method drops a table. Will throw a Database_Exception if it cannot.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Drop a table named `my_table`
DBUtil::drop_table('my_table');
// Catch the exception
try
{
DBUtil::drop_table('my_table');
}
catch(\Database_Exception $e)
{
// Drop failed...
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_rename_table">rename_table($table, $new_table_name, $db = null)</h4>
<p>The <strong>rename_table</strong> method renames a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>The old table name</td>
</tr>
<tr>
<th><kbd>$new_table_name</kbd></th>
<td><em>required</em></td>
<td>The new table name</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>// Rename `my_table` to `my_new_table`
DBUtil::rename_table('my_table', 'my_new_table');
// Catch the exception
try
{
DBUtil::rename_table('my_table', 'my_new_table');
}
catch(\Database_Exception $e)
{
// Rename failed...
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_create_table">create_table($table, $fields, $primary_keys = array(), $if_not_exists = true, $engine = false, $charset = null, $foreign_keys = array(), $db = null)</h4>
<p>The <strong>create_table</strong> method creates a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$fields</kbd></th>
<td><em>required</em></td>
<td>array containing fields</td>
</tr>
<tr>
<th><kbd>$primary_keys</kbd></th>
<td><strong>array()</strong></td>
<td>array containing primary keys</td>
</tr>
<tr>
<th><kbd>$if_not_exists</kbd></th>
<td><strong>true</strong></td>
<td>whether to use <em>IF NOT EXISTS</em></td>
</tr>
<tr>
<th><kbd>$engine</kbd></th>
<td><strong>false</strong></td>
<td>which storage engine to use (MyISAM, InnoDB, ...)</td>
</tr>
<tr>
<th><kbd>$charset</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td>the default charset, falls back to the active db group charset</td>
</tr>
<tr>
<th><kbd>$foreign_keys</kbd></th>
<td><strong>array()</strong></td>
<td>array of foreign key constraints definitions. The array keys 'key' and 'reference' are required and throw an error if missing, all others are optional.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::create_table(
'users',
array(
'id' => array('constraint' => 11, 'type' => 'int', 'auto_increment' => true),
'name' => array('type' => 'text'),
'email' => array('constraint' => 50, 'type' => 'varchar'),
'title' => array('constraint' => 50, 'type' => 'varchar', 'default' => 'mr.'),
'password' => array('constraint' => 125, 'type' => 'varchar'),
),
array('id'), false, 'InnoDB', 'utf8_unicode_ci',
array(
array(
'constraint' => 'constraintA',
'key' => 'keyA',
'reference' => array(
'table' => 'table',
'column' => 'field',
),
'on_update' => 'CASCADE',
'on_delete' => 'RESTRICT'
),
array(
'key' => 'keyB',
'reference' => array(
'table' => 'table',
'column' => array(
'fieldA',
'fieldB'
),
),
),
),
);
/* produces the following SQL statement:
CREATE TABLE `users` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` text NOT NULL,
`email` varchar(50) NOT NULL,
`title` varchar(50) DEFAULT 'mr.' NOT NULL,
`password` varchar(125) NOT NULL,
PRIMARY KEY `id` (`id`),
CONSTRAINT constraintA FOREIGN KEY (keyA) REFERENCES table (field) ON UPDATE CASCADE ON DELETE RESTRICT,
FOREIGN KEY (keyB) REFERENCES table (fieldA, fieldB)
) ENGINE = InnoDB DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;
*/</code></pre>
</td>
</tr>
<tr>
<th>Possible parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>name</kbd></th>
<td>the field name</td>
</tr>
<tr>
<th><kbd>type</kbd></th>
<td>field type (varchar, enum, text, ...)</td>
</tr>
<tr>
<th><kbd>constraint</kbd></th>
<td>Length or values. Values can be provided in an array</td>
</tr>
<tr>
<th><kbd>charset</kbd></th>
<td>field charset</td>
</tr>
<tr>
<th><kbd>unsigned</kbd></th>
<td>boolean, <em>true</em> if is unsigned, defaults to <em>false</em></td>
</tr>
<tr>
<th><kbd>default</kbd></th>
<td>default value</td>
</tr>
<tr>
<th><kbd>null</kbd></th>
<td>boolean, <em>true</em> if nullable, defaults to <em>false</em></td>
</tr>
<tr>
<th><kbd>auto_increment</kbd></th>
<td>set to <em>true</em> to use auto incrementing</td>
</tr>
<tr>
<th><kbd>comment</kbd></th>
<td>adds a comment to your field</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
<p>Default fields values are escaped by default. In some cases you might want not to escape this value. If so, you can use <a href="./db.html#method_expr">DB::expr</a>.</p>
<pre class="php"><code>\DBUtil::create_table('users', array(
'id' => array('constraint' => 11, 'type' => 'int', 'auto_increment' => true),
'name' => array('type' => 'text'),
'email' => array('constraint' => 50, 'type' => 'varchar'),
'title' => array('constraint' => 50, 'type' => 'varchar', 'default' => 'mr.'),
'created' => array('type' => 'timestamp', 'default' => \DB::expr('CURRENT_TIMESTAMP')),
'password' => array('constraint' => 125, 'type' => 'varchar'),
), array('id'));
</code></pre>
<p>Escaping values work the same for DBUtil::add_fields and DBUtil::modify_fields</p>
</article>
<article>
<h4 class="method" id="method_field_exists">field_exists($table, $columns, $db = null)</h4>
<p>The <strong>field_exists</strong> method checks if given field(s) in a given table exists.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<td>$table</td>
<td>required</td>
<td>the table name</td>
</tr>
<tr>
<td>$columns</td>
<td>required</td>
<td>array containing fields</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns true if field/column exists, false otherwise</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>
if(DBUtil::field_exists('my_table', array('my_field_or_column')))
{
// Fields exist
}
else
{
// Fields are not available on the table
}</code>
</pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_add_fields">add_fields($table, $fields, $db = null)</h4>
<p>The <strong>add_fields</strong> method adds fields to a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$fields</kbd></th>
<td><em>required</em></td>
<td>array containing fields</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::add_fields('users', array(
'surname' => array('constraint' => 100, 'type' => 'varchar'),
'twitter_name' => array('constraint' => 100, 'type' => 'varchar'),
));
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_drop_fields">drop_fields($table, $fields, $db = null)</h4>
<p>The <strong>drop_fields</strong> method drops fields from a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>string of the table name</td>
</tr>
<tr>
<th><kbd>$fields</kbd></th>
<td><em>required</em></td>
<td>string or array containing fields</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::drop_fields('users', 'surname');</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_modify_fields">modify_fields($table, $fields, $db = null)</h4>
<p>The <strong>modify_fields</strong> method alters fields in a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$fields</kbd></th>
<td><em>required</em></td>
<td>array containing fields</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::modify_fields('users2', array(
'name' => array('constraint' => 100, 'type' => 'varchar', 'charset' => 'utf8_general_ci'),
'title' => array('constraint' => 50, 'type' => 'varchar', 'default' => 'mrs.'),
));
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_create_index">create_index($table, $index_columns, $index_name, $index = '', $db = null)</h4>
<p>The <strong>create_index</strong> method allows you to create secondary indexes on a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$index_columns</kbd></th>
<td><em>required</em></td>
<td>mixed, string or array of strings containing fields</td>
</tr>
<tr>
<th><kbd>$index_name</kbd></th>
<td><em>optional</em></td>
<td>name of the index to be created</td>
</tr>
<tr>
<th><kbd>$index</kbd></th>
<td><em>optional</em></td>
<td>type of index to be created.<br />Currently supported: UNIQUE, FULLTEXT, SPATIAL, NONCLUSTERED</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the result of the database operation.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::create_index('table', 'name');
// produces CREATE INDEX name ON table ( `name` )
\DBUtil::create_index('table', array('nameA', 'nameB'), 'name');
// produces CREATE INDEX name ON table ( `nameA`, `nameB` )
\DBUtil::create_index('table', array('nameA' => 'ASC', 'nameB'), 'name', 'fulltext');
// produces CREATE FULLTEXT INDEX name ON table ( `nameA` ASC, `nameB` )
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_drop_index">drop_index($table, $index_name, $db = null)</h4>
<p>The <strong>drop_index</strong> method allows you to drop a secondary index from a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$index_name</kbd></th>
<td><em>required</em></td>
<td>name of the index to be dropped</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the result of the database operation.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::drop_index('table', 'name');
// produces DROP INDEX name ON table
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_add_foregin_key">add_foregin_key($table, $foreign_key)</h4>
<p>The <strong>add_foreign_key</strong> method allows you to add a foreign key to a table after its creation.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$foreign_key</kbd></th>
<td><em>required</em></td>
<td>array containing the foreign key definition.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the result of the database operation.</td>
</tr>
<tr>
<th>Throws</th>
<td>\InvalidArgumentException on invalid input, \Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::add_foreign_key('users', array(
'constraint' => 'constraintA',
'key' => 'keyA',
'reference' => array(
'table' => 'table',
'column' => 'field',
),
'on_update' => 'CASCADE',
'on_delete' => 'RESTRICT'
));</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_drop_foreign_key">drop_foreign_key($table, $fk_name)</h4>
<p>The <strong>drop_foreign_key</strong> method allows you to drop a foreign key from a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td>the table name</td>
</tr>
<tr>
<th><kbd>$fk_name</kbd></th>
<td><em>required</em></td>
<td>name of the foreign key to be dropped</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the result of the database operation.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception on failure</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::drop_foreign_key('table', 'name');
// produces ALTER TABLE `table` DROP FOREIGN KEY name
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<h3 id="database-operations">Database operations</h3>
<article>
<h4 class="method" id="method_truncate_table">truncate_table($table, $db = null)</h4>
<p>The <strong>truncate_table</strong> method truncates a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td class="description">the table name.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td>Returns the number of affected rows.</td>
</tr>
<tr>
<th>Throws</th>
<td>\Database_Exception when not supported</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>\DBUtil::truncate_table('my_table');</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_analyze_table">analyze_table($table, $db = null)</h4>
<p>The <strong>analyze_table</strong> method analyzes a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td class="description">the table name.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td><em>True</em> if the table is OK. <em>False</em> when needs attention. If not supported it logs the error message.</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>if(\DBUtil::analyze_table('table_name') === false)
{
// Do something
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_check_table">check_table($table, $db = null)</h4>
<p>The <strong>check_table</strong> method checks a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td class="description">the table name.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td><em>True</em> if the table is OK. <em>False</em> when needs attention. If not supported it logs the error message.</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>if(\DBUtil::check_table('table_name') === false)
{
// Do something
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_optimize_table">optimize_table($table, $db = null)</h4>
<p>The <strong>optimize_table</strong> method optimizes a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td class="description">the table name.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td><em>True</em> if the table is OK or optimized. <em>False</em> on failure. If not supported or failed it logs the error message.</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>if(\DBUtil::optimize_table('table_name') === false)
{
// Do something
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
<article>
<h4 class="method" id="method_repair_table">repair_table($table, $db = null)</h4>
<p>The <strong>repair_table</strong> method repairs a table.</p>
<table class="method">
<tbody>
<tr>
<th>Static</th>
<td>Yes</td>
</tr>
<tr>
<th>Parameters</th>
<td>
<table class="parameters">
<tr>
<th>Param</th>
<th>Default</th>
<th class="description">Description</th>
</tr>
<tr>
<th><kbd>$table</kbd></th>
<td><em>required</em></td>
<td class="description">the table name.</td>
</tr>
<tr>
<th><kbd>$db</kbd></th>
<td><pre class="php"><code>null</code></pre></td>
<td class="description">The database connection.</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Returns</th>
<td><em>True</em> if the table is OK or repaired. <em>false</em> on failure. If not supported or failed it logs the error message.</td>
</tr>
<tr>
<th>Example</th>
<td>
<pre class="php"><code>if(\DBUtil::repair_table('table_name') === false)
{
// Do something
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</article>
</div>
<footer>
<p>
© FuelPHP Development Team 2010-2016 - <a href="http://fuelphp.com">FuelPHP</a> is released under the MIT license.
</p>
</footer>
</div>
</body>
</html>
|
import Ember from "ember-metal/core";
import EmberObject from "ember-runtime/system/object";
import TargetActionSupport from "ember-runtime/mixins/target_action_support";
var originalLookup;
QUnit.module("TargetActionSupport", {
setup() {
originalLookup = Ember.lookup;
},
teardown() {
Ember.lookup = originalLookup;
}
});
QUnit.test("it should return false if no target or action are specified", function() {
expect(1);
var obj = EmberObject.createWithMixins(TargetActionSupport);
ok(false === obj.triggerAction(), "no target or action was specified");
});
QUnit.test("it should support actions specified as strings", function() {
expect(2);
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
anEvent() {
ok(true, "anEvent method was called");
}
}),
action: 'anEvent'
});
ok(true === obj.triggerAction(), "a valid target and action were specified");
});
QUnit.test("it should invoke the send() method on objects that implement it", function() {
expect(3);
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
send(evt, context) {
equal(evt, 'anEvent', "send() method was invoked with correct event name");
equal(context, obj, "send() method was invoked with correct context");
}
}),
action: 'anEvent'
});
ok(true === obj.triggerAction(), "a valid target and action were specified");
});
QUnit.test("it should find targets specified using a property path", function() {
expect(2);
var Test = {};
Ember.lookup = { Test: Test };
Test.targetObj = EmberObject.create({
anEvent() {
ok(true, "anEvent method was called on global object");
}
});
var myObj = EmberObject.createWithMixins(TargetActionSupport, {
target: 'Test.targetObj',
action: 'anEvent'
});
ok(true === myObj.triggerAction(), "a valid target and action were specified");
});
QUnit.test("it should use an actionContext object specified as a property on the object", function() {
expect(2);
var obj = EmberObject.createWithMixins(TargetActionSupport, {
action: 'anEvent',
actionContext: {},
target: EmberObject.create({
anEvent(ctx) {
ok(obj.actionContext === ctx, "anEvent method was called with the expected context");
}
})
});
ok(true === obj.triggerAction(), "a valid target and action were specified");
});
QUnit.test("it should find an actionContext specified as a property path", function() {
expect(2);
var Test = {};
Ember.lookup = { Test: Test };
Test.aContext = {};
var obj = EmberObject.createWithMixins(TargetActionSupport, {
action: 'anEvent',
actionContext: 'Test.aContext',
target: EmberObject.create({
anEvent(ctx) {
ok(Test.aContext === ctx, "anEvent method was called with the expected context");
}
})
});
ok(true === obj.triggerAction(), "a valid target and action were specified");
});
QUnit.test("it should use the target specified in the argument", function() {
expect(2);
var targetObj = EmberObject.create({
anEvent() {
ok(true, "anEvent method was called");
}
});
var obj = EmberObject.createWithMixins(TargetActionSupport, {
action: 'anEvent'
});
ok(true === obj.triggerAction({ target: targetObj }), "a valid target and action were specified");
});
QUnit.test("it should use the action specified in the argument", function() {
expect(2);
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
anEvent() {
ok(true, "anEvent method was called");
}
})
});
ok(true === obj.triggerAction({ action: 'anEvent' }), "a valid target and action were specified");
});
QUnit.test("it should use the actionContext specified in the argument", function() {
expect(2);
var context = {};
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
anEvent(ctx) {
ok(context === ctx, "anEvent method was called with the expected context");
}
}),
action: 'anEvent'
});
ok(true === obj.triggerAction({ actionContext: context }), "a valid target and action were specified");
});
QUnit.test("it should allow multiple arguments from actionContext", function() {
expect(3);
var param1 = 'someParam';
var param2 = 'someOtherParam';
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
anEvent(first, second) {
ok(first === param1, "anEvent method was called with the expected first argument");
ok(second === param2, "anEvent method was called with the expected second argument");
}
}),
action: 'anEvent'
});
ok(true === obj.triggerAction({ actionContext: [param1, param2] }), "a valid target and action were specified");
});
QUnit.test("it should use a null value specified in the actionContext argument", function() {
expect(2);
var obj = EmberObject.createWithMixins(TargetActionSupport, {
target: EmberObject.create({
anEvent(ctx) {
ok(null === ctx, "anEvent method was called with the expected context (null)");
}
}),
action: 'anEvent'
});
ok(true === obj.triggerAction({ actionContext: null }), "a valid target and action were specified");
});
|
<?php
if(php_sapi_name() != 'cli') {
die("This script must be called from the command line\n");
}
if(!empty($_SERVER['argv'][1])) {
$path = $_SERVER['argv'][1];
} else {
die("Usage: php {$_SERVER['argv'][0]} <file>\n");
}
$result = array('comments' => array());
$extension = pathinfo($path, PATHINFO_EXTENSION);
// Whitelist of extensions to check (default phpcs list)
if(in_array($extension, array('php', 'js', 'inc', 'css'))) {
// Run each sniff
// phpcs --encoding=utf-8 --standard=framework/tests/phpcs/tabs.xml
run_sniff('tabs.xml', $path, $result);
// phpcs --encoding=utf-8 --tab-width=4 --standard=framework/tests/phpcs/ruleset.xml
run_sniff('ruleset.xml', $path, $result, '--tab-width=4');
}
echo json_encode($result);
function run_sniff($standard, $path, array &$result, $extraFlags = '') {
$sniffPath = escapeshellarg(__DIR__ . '/phpcs/' . $standard);
$checkPath = escapeshellarg($path);
exec("phpcs --encoding=utf-8 $extraFlags --standard=$sniffPath --report=xml $checkPath", $output);
// We can't check the return code as it's non-zero if the sniff finds an error
if($output) {
$xml = implode("\n", $output);
$xml = simplexml_load_string($xml);
$errors = $xml->xpath('/phpcs/file/error');
if($errors) {
$sanePath = str_replace('/', '_', $path);
foreach($errors as $error) {
$attributes = $error->attributes();
$result['comments'][] = array(
'line' => (int)strval($attributes->line),
'id' => $standard . '-' . $sanePath . '-' . $attributes->line . '-' . $attributes->column,
'message' => strval($error)
);
}
}
}
}
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @format
* @flow
*/
'use strict';
import {BatchedBridge} from 'react-native/Libraries/ReactPrivate/ReactNativePrivateInterface';
// TODO @sema: Adjust types
import type {ReactNativeType} from './ReactNativeTypes';
let ReactFabric;
if (__DEV__) {
ReactFabric = require('../implementations/ReactFabric-dev');
} else {
ReactFabric = require('../implementations/ReactFabric-prod');
}
BatchedBridge.registerCallableModule('ReactFabric', ReactFabric);
module.exports = (ReactFabric: ReactNativeType);
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Linq;
using System.Security.Cryptography.X509Certificates;
using Test.Cryptography;
using Xunit;
namespace System.Security.Cryptography.Pkcs.Tests
{
public static class TimestampTokenTests
{
[Theory]
[InlineData(nameof(TimestampTokenTestData.FreeTsaDotOrg1))]
[InlineData(nameof(TimestampTokenTestData.Symantec1))]
public static void ParseDocument(string testDataName)
{
TimestampTokenTestData testData = TimestampTokenTestData.GetTestData(testDataName);
TestParseDocument(testData.FullTokenBytes, testData, testData.FullTokenBytes.Length);
}
[Theory]
[InlineData(nameof(TimestampTokenTestData.FreeTsaDotOrg1))]
[InlineData(nameof(TimestampTokenTestData.Symantec1))]
public static void ParseDocument_ExcessData(string testDataName)
{
TimestampTokenTestData testData = TimestampTokenTestData.GetTestData(testDataName);
int baseLen = testData.FullTokenBytes.Length;
byte[] tooMuchData = new byte[baseLen + 30];
testData.FullTokenBytes.CopyTo(tooMuchData);
// Look like an octet string of the remainder of the payload. Should be ignored.
tooMuchData[baseLen] = 0x04;
tooMuchData[baseLen + 1] = 28;
TestParseDocument(tooMuchData, testData, baseLen);
}
private static void TestParseDocument(
ReadOnlyMemory<byte> tokenBytes,
TimestampTokenTestData testData,
int? expectedBytesRead)
{
int bytesRead;
Rfc3161TimestampToken token;
Assert.True(
Rfc3161TimestampToken.TryDecode(tokenBytes, out token, out bytesRead),
"Rfc3161TimestampToken.TryDecode");
if (expectedBytesRead != null)
{
Assert.Equal(expectedBytesRead.Value, bytesRead);
}
Assert.NotNull(token);
TimestampTokenInfoTests.AssertEqual(testData, token.TokenInfo);
SignedCms signedCms = token.AsSignedCms();
Assert.NotNull(signedCms);
Assert.Equal(Oids.TstInfo, signedCms.ContentInfo.ContentType.Value);
Assert.Equal(
testData.TokenInfoBytes.ByteArrayToHex(),
signedCms.ContentInfo.Content.ByteArrayToHex());
if (testData.EmbeddedSigningCertificate != null)
{
Assert.NotNull(signedCms.SignerInfos[0].Certificate);
Assert.Equal(
testData.EmbeddedSigningCertificate.Value.ByteArrayToHex(),
signedCms.SignerInfos[0].Certificate.RawData.ByteArrayToHex());
// Assert.NoThrow
signedCms.CheckSignature(true);
}
else
{
Assert.Null(signedCms.SignerInfos[0].Certificate);
using (var signerCert = new X509Certificate2(testData.ExternalCertificateBytes))
{
// Assert.NoThrow
signedCms.CheckSignature(
new X509Certificate2Collection(signerCert),
true);
}
}
X509Certificate2 returnedCert;
ReadOnlySpan<byte> messageContentSpan = testData.MessageContent.Span;
X509Certificate2Collection candidates = null;
if (testData.EmbeddedSigningCertificate != null)
{
Assert.True(
token.VerifySignatureForData(messageContentSpan, out returnedCert),
"token.VerifySignatureForData(correct)");
Assert.NotNull(returnedCert);
Assert.Equal(signedCms.SignerInfos[0].Certificate, returnedCert);
}
else
{
candidates = new X509Certificate2Collection
{
new X509Certificate2(testData.ExternalCertificateBytes),
};
Assert.False(
token.VerifySignatureForData(messageContentSpan, out returnedCert),
"token.VerifySignatureForData(correct, no cert)");
Assert.Null(returnedCert);
Assert.True(
token.VerifySignatureForData(messageContentSpan, out returnedCert, candidates),
"token.VerifySignatureForData(correct, certs)");
Assert.NotNull(returnedCert);
Assert.Equal(candidates[0], returnedCert);
}
X509Certificate2 previousCert = returnedCert;
Assert.False(
token.VerifySignatureForData(messageContentSpan.Slice(1), out returnedCert, candidates),
"token.VerifySignatureForData(incorrect)");
Assert.Null(returnedCert);
byte[] messageHash = testData.HashBytes.ToArray();
Assert.False(
token.VerifySignatureForHash(messageHash, HashAlgorithmName.MD5, out returnedCert, candidates),
"token.VerifyHash(correct, MD5)");
Assert.Null(returnedCert);
Assert.False(
token.VerifySignatureForHash(messageHash, new Oid(Oids.Md5), out returnedCert, candidates),
"token.VerifyHash(correct, Oid(MD5))");
Assert.Null(returnedCert);
Assert.True(
token.VerifySignatureForHash(messageHash, new Oid(testData.HashAlgorithmId), out returnedCert, candidates),
"token.VerifyHash(correct, Oid(algId))");
Assert.NotNull(returnedCert);
Assert.Equal(previousCert, returnedCert);
messageHash[0] ^= 0xFF;
Assert.False(
token.VerifySignatureForHash(messageHash, new Oid(testData.HashAlgorithmId), out returnedCert, candidates),
"token.VerifyHash(incorrect, Oid(algId))");
Assert.Null(returnedCert);
}
[Fact]
public static void TryDecode_Fails_SignedCmsOfData()
{
Assert.False(
Rfc3161TimestampToken.TryDecode(
SignedDocuments.RsaPkcs1OneSignerIssuerAndSerialNumber,
out Rfc3161TimestampToken token,
out int bytesRead),
"Rfc3161TimestampToken.TryDecode");
Assert.Equal(0, bytesRead);
Assert.Null(token);
}
[Fact]
public static void TryDecode_Fails_Empty()
{
Assert.False(
Rfc3161TimestampToken.TryDecode(
ReadOnlyMemory<byte>.Empty,
out Rfc3161TimestampToken token,
out int bytesRead),
"Rfc3161TimestampToken.TryDecode");
Assert.Equal(0, bytesRead);
Assert.Null(token);
}
[Fact]
public static void TryDecode_Fails_EnvelopedCms()
{
byte[] encodedMessage =
("3082010c06092a864886f70d010703a081fe3081fb0201003181c83081c5020100302e301a311830160603550403130f5253"
+ "414b65795472616e7366657231021031d935fb63e8cfab48a0bf7b397b67c0300d06092a864886f70d010101050004818013"
+ "dc0eb2984a445d04a1f6246b8fe41f1d24507548d449d454d5bb5e0638d75ed101bf78c0155a5d208eb746755fbccbc86923"
+ "8443760a9ae94770d6373e0197be23a6a891f0c522ca96b3e8008bf23547474b7e24e7f32e8134df3862d84f4dea2470548e"
+ "c774dd74f149a56cdd966e141122900d00ad9d10ea1848541294a1302b06092a864886f70d010701301406082a864886f70d"
+ "030704089c8119f6cf6b174c8008bcea3a10d0737eb9").HexToByteArray();
Assert.False(
Rfc3161TimestampToken.TryDecode(
encodedMessage,
out Rfc3161TimestampToken token,
out int bytesRead),
"Rfc3161TimestampToken.TryDecode");
Assert.Equal(0, bytesRead);
Assert.Null(token);
}
[Fact]
public static void TryDecode_Fails_MalformedToken()
{
ContentInfo contentInfo = new ContentInfo(
new Oid(Oids.TstInfo, Oids.TstInfo),
new byte[] { 1 });
SignedCms cms = new SignedCms(contentInfo);
using (X509Certificate2 cert = Certificates.RSAKeyTransferCapi1.TryGetCertificateWithPrivateKey())
{
cms.ComputeSignature(new CmsSigner(SubjectIdentifierType.IssuerAndSerialNumber, cert));
}
Assert.False(
Rfc3161TimestampToken.TryDecode(
cms.Encode(),
out Rfc3161TimestampToken token,
out int bytesRead),
"Rfc3161TimestampToken.TryDecode");
Assert.Equal(0, bytesRead);
Assert.Null(token);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain, SigningCertificateOption.ValidHashNoName)]
[InlineData(X509IncludeOption.None, SigningCertificateOption.ValidHashNoName)]
[InlineData(X509IncludeOption.WholeChain, SigningCertificateOption.ValidHashWithName)]
[InlineData(X509IncludeOption.None, SigningCertificateOption.ValidHashWithName)]
public static void MatchV1(X509IncludeOption includeOption, SigningCertificateOption v1Option)
{
CustomBuild_CertMatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
v1Option,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void CertHashMismatchV1(X509IncludeOption includeOption)
{
CustomBuild_CertMismatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
SigningCertificateOption.InvalidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber)]
public static void CertMismatchIssuerAndSerialV1(
X509IncludeOption includeOption,
SigningCertificateOption v1Option,
SubjectIdentifierType identifierType)
{
CustomBuild_CertMismatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
v1Option,
SigningCertificateOption.Omit,
includeOption: includeOption,
identifierType: identifierType);
}
[Theory]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
"MD5")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
"MD5")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
"SHA1")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
"SHA1")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
public static void MatchV2(
X509IncludeOption includeOption,
SigningCertificateOption v2Option,
string hashAlgName)
{
CustomBuild_CertMatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
SigningCertificateOption.Omit,
v2Option,
hashAlgName == null ? default(HashAlgorithmName) : new HashAlgorithmName(hashAlgName),
includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain, null)]
[InlineData(X509IncludeOption.None, null)]
[InlineData(X509IncludeOption.WholeChain, "MD5")]
[InlineData(X509IncludeOption.None, "MD5")]
[InlineData(X509IncludeOption.WholeChain, "SHA1")]
[InlineData(X509IncludeOption.None, "SHA1")]
[InlineData(X509IncludeOption.WholeChain, "SHA384")]
[InlineData(X509IncludeOption.None, "SHA384")]
public static void CertHashMismatchV2(X509IncludeOption includeOption, string hashAlgName)
{
CustomBuild_CertMismatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
SigningCertificateOption.Omit,
SigningCertificateOption.InvalidHashNoName,
hashAlgName == null ? default(HashAlgorithmName) : new HashAlgorithmName(hashAlgName),
includeOption: includeOption);
}
[Theory]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier,
"SHA384")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.SubjectKeyIdentifier,
"SHA384")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber,
"SHA384")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithInvalidName,
SubjectIdentifierType.IssuerAndSerialNumber,
"SHA384")]
public static void CertMismatchIssuerAndSerialV2(
X509IncludeOption includeOption,
SigningCertificateOption v2Option,
SubjectIdentifierType identifierType,
string hashAlgName)
{
CustomBuild_CertMismatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
SigningCertificateOption.Omit,
v2Option,
hashAlgName == null ? default(HashAlgorithmName) : new HashAlgorithmName(hashAlgName),
includeOption: includeOption,
identifierType: identifierType);
}
[Theory]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashNoName,
"SHA512")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashNoName,
"SHA512")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashWithName,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashWithName,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashNoName,
"SHA512")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashNoName,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashNoName,
"SHA512")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashWithName,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashWithName,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.ValidHashWithName,
"SHA384")]
public static void CertMatchV1AndV2(
X509IncludeOption includeOption,
SigningCertificateOption v1Option,
SigningCertificateOption v2Option,
string hashAlgName)
{
CustomBuild_CertMatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
v1Option,
v2Option,
hashAlgName == null ? default(HashAlgorithmName) : new HashAlgorithmName(hashAlgName),
includeOption);
}
[Theory]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.InvalidHashNoName,
SigningCertificateOption.ValidHashWithName,
SubjectIdentifierType.IssuerAndSerialNumber,
null)]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidSerial,
SigningCertificateOption.ValidHashWithName,
SubjectIdentifierType.IssuerAndSerialNumber,
"SHA384")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.ValidHashWithInvalidName,
SigningCertificateOption.InvalidHashNoName,
SubjectIdentifierType.SubjectKeyIdentifier,
null)]
[InlineData(
X509IncludeOption.None,
SigningCertificateOption.ValidHashWithName,
SigningCertificateOption.InvalidHashNoName,
SubjectIdentifierType.SubjectKeyIdentifier,
"SHA512")]
[InlineData(
X509IncludeOption.WholeChain,
SigningCertificateOption.InvalidHashWithInvalidSerial,
SigningCertificateOption.ValidHashNoName,
SubjectIdentifierType.IssuerAndSerialNumber,
null)]
public static void CertMismatchV1OrV2(
X509IncludeOption includeOption,
SigningCertificateOption v1Option,
SigningCertificateOption v2Option,
SubjectIdentifierType identifierType,
string hashAlgName)
{
CustomBuild_CertMismatch(
Certificates.ValidLookingTsaCert,
new DateTimeOffset(2018, 1, 10, 17, 21, 11, 802, TimeSpan.Zero),
v1Option,
v2Option,
hashAlgName == null ? default(HashAlgorithmName) : new HashAlgorithmName(hashAlgName),
includeOption: includeOption,
identifierType: identifierType);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void TimestampTooOld(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.ValidLookingTsaCert;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotBefore.AddSeconds(-1);
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void TimestampTooNew(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.ValidLookingTsaCert;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotAfter.AddSeconds(1);
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void NoEkuExtension(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.RSA2048SignatureOnly;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotAfter.AddDays(-1);
Assert.Equal(0, cert.Extensions.OfType<X509EnhancedKeyUsageExtension>().Count());
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void TwoEkuExtensions(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.TwoEkuTsaCert;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotAfter.AddDays(-1);
var ekuExts = cert.Extensions.OfType<X509EnhancedKeyUsageExtension>().ToList();
Assert.Equal(2, ekuExts.Count);
// Make sure we're validating that "early success" doesn't happen.
Assert.Contains(
Oids.TimeStampingPurpose,
ekuExts[0].EnhancedKeyUsages.OfType<Oid>().Select(o => o.Value));
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void NonCriticalEkuExtension(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.NonCriticalTsaEku;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotAfter.AddDays(-1);
var ekuExts = cert.Extensions.OfType<X509EnhancedKeyUsageExtension>().ToList();
Assert.Equal(1, ekuExts.Count);
Assert.False(ekuExts[0].Critical, "ekuExts[0].Critical");
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
[Theory]
[InlineData(X509IncludeOption.WholeChain)]
[InlineData(X509IncludeOption.None)]
public static void NoTsaEku(X509IncludeOption includeOption)
{
CertLoader loader = Certificates.TlsClientServerCert;
DateTimeOffset referenceTime;
using (X509Certificate2 cert = loader.GetCertificate())
{
referenceTime = cert.NotAfter.AddDays(-1);
}
CustomBuild_CertMismatch(
loader,
referenceTime,
SigningCertificateOption.ValidHashNoName,
SigningCertificateOption.Omit,
includeOption: includeOption);
}
private static void CustomBuild_CertMatch(
CertLoader loader,
DateTimeOffset referenceTime,
SigningCertificateOption v1Option,
SigningCertificateOption v2Option,
HashAlgorithmName v2AlgorithmName = default,
X509IncludeOption includeOption = default,
SubjectIdentifierType identifierType = SubjectIdentifierType.IssuerAndSerialNumber)
{
byte[] tokenBytes = BuildCustomToken(
loader,
referenceTime,
v1Option,
v2Option,
v2AlgorithmName,
includeOption,
identifierType);
Rfc3161TimestampToken token;
Assert.True(Rfc3161TimestampToken.TryDecode(tokenBytes, out token, out int bytesRead));
Assert.Equal(tokenBytes.Length, bytesRead);
Assert.NotNull(token);
Assert.Equal(referenceTime, token.TokenInfo.Timestamp);
using (X509Certificate2 cert = Certificates.ValidLookingTsaCert.GetCertificate())
{
Assert.True(
token.VerifySignatureForHash(
token.TokenInfo.GetMessageHash().Span,
token.TokenInfo.HashAlgorithmId,
out X509Certificate2 signer,
new X509Certificate2Collection(cert)));
Assert.Equal(cert, signer);
}
}
private static void CustomBuild_CertMismatch(
CertLoader loader,
DateTimeOffset referenceTime,
SigningCertificateOption v1Option,
SigningCertificateOption v2Option,
HashAlgorithmName v2AlgorithmName = default,
X509IncludeOption includeOption = default,
SubjectIdentifierType identifierType = SubjectIdentifierType.IssuerAndSerialNumber)
{
byte[] tokenBytes = BuildCustomToken(
loader,
referenceTime,
v1Option,
v2Option,
v2AlgorithmName,
includeOption,
identifierType);
Rfc3161TimestampToken token;
bool willParse = includeOption == X509IncludeOption.None;
if (willParse && identifierType == SubjectIdentifierType.IssuerAndSerialNumber)
{
// Because IASN matches against the ESSCertId(V2) directly it will reject the token.
switch (v1Option)
{
case SigningCertificateOption.ValidHashWithInvalidName:
case SigningCertificateOption.ValidHashWithInvalidSerial:
case SigningCertificateOption.InvalidHashWithInvalidName:
case SigningCertificateOption.InvalidHashWithInvalidSerial:
willParse = false;
break;
}
switch (v2Option)
{
case SigningCertificateOption.ValidHashWithInvalidName:
case SigningCertificateOption.ValidHashWithInvalidSerial:
case SigningCertificateOption.InvalidHashWithInvalidName:
case SigningCertificateOption.InvalidHashWithInvalidSerial:
willParse = false;
break;
}
}
if (willParse)
{
Assert.True(Rfc3161TimestampToken.TryDecode(tokenBytes, out token, out int bytesRead));
Assert.NotNull(token);
Assert.Equal(tokenBytes.Length, bytesRead);
using (X509Certificate2 cert = loader.GetCertificate())
{
Assert.False(
token.VerifySignatureForHash(
token.TokenInfo.GetMessageHash().Span,
token.TokenInfo.HashAlgorithmId,
out X509Certificate2 signer,
new X509Certificate2Collection(cert)));
Assert.Null(signer);
}
}
else
{
Assert.False(Rfc3161TimestampToken.TryDecode(tokenBytes, out token, out int bytesRead));
Assert.Null(token);
Assert.Equal(0, bytesRead);
}
}
private static byte[] BuildCustomToken(
CertLoader cert,
DateTimeOffset timestamp,
SigningCertificateOption v1Option,
SigningCertificateOption v2Option,
HashAlgorithmName v2DigestAlg=default,
X509IncludeOption includeOption=X509IncludeOption.ExcludeRoot,
SubjectIdentifierType identifierType=SubjectIdentifierType.IssuerAndSerialNumber)
{
long accuracyMicroSeconds = (long)(TimeSpan.FromMinutes(1).TotalMilliseconds * 1000);
byte[] serialNumber = BitConverter.GetBytes(DateTimeOffset.UtcNow.Ticks);
Array.Reverse(serialNumber);
Rfc3161TimestampTokenInfo info = new Rfc3161TimestampTokenInfo(
new Oid("0.0", "0.0"),
new Oid(Oids.Sha384),
new byte[384 / 8],
serialNumber,
timestamp,
accuracyMicroSeconds,
isOrdering: true);
ContentInfo contentInfo = new ContentInfo(new Oid(Oids.TstInfo, Oids.TstInfo), info.Encode());
SignedCms cms = new SignedCms(contentInfo);
using (X509Certificate2 tsaCert = cert.TryGetCertificateWithPrivateKey())
{
CmsSigner signer = new CmsSigner(identifierType, tsaCert)
{
IncludeOption = includeOption
};
if (v1Option != SigningCertificateOption.Omit)
{
ExpandOption(v1Option, out bool validHash, out bool skipIssuerSerial, out bool validName, out bool validSerial);
// simple SigningCertificate
byte[] signingCertificateV1Bytes =
"301A3018301604140000000000000000000000000000000000000000".HexToByteArray();
if (validHash)
{
using (SHA1 hasher = SHA1.Create())
{
byte[] hash = hasher.ComputeHash(tsaCert.RawData);
Buffer.BlockCopy(
hash,
0,
signingCertificateV1Bytes,
signingCertificateV1Bytes.Length - hash.Length,
hash.Length);
}
}
if (!skipIssuerSerial)
{
byte[] footer = BuildIssuerAndSerialNumber(tsaCert, validName, validSerial);
signingCertificateV1Bytes[1] += (byte)footer.Length;
signingCertificateV1Bytes[3] += (byte)footer.Length;
signingCertificateV1Bytes[5] += (byte)footer.Length;
Assert.InRange(signingCertificateV1Bytes[1], 0, 127);
signingCertificateV1Bytes = signingCertificateV1Bytes.Concat(footer).ToArray();
}
signer.SignedAttributes.Add(
new AsnEncodedData("1.2.840.113549.1.9.16.2.12", signingCertificateV1Bytes));
}
if (v2Option != SigningCertificateOption.Omit)
{
byte[] attrBytes;
byte[] algBytes = Array.Empty<byte>();
byte[] hashBytes;
byte[] issuerNameBytes = Array.Empty<byte>();
if (v2DigestAlg != default)
{
switch (v2DigestAlg.Name)
{
case "MD5":
algBytes = "300C06082A864886F70D02050500".HexToByteArray();
break;
case "SHA1":
algBytes = "300906052B0E03021A0500".HexToByteArray();
break;
case "SHA256":
// Invalid under DER, because it's the default.
algBytes = "300D06096086480165030402010500".HexToByteArray();
break;
case "SHA384":
algBytes = "300D06096086480165030402020500".HexToByteArray();
break;
case "SHA512":
algBytes = "300D06096086480165030402030500".HexToByteArray();
break;
default:
throw new NotSupportedException(v2DigestAlg.Name);
}
}
else
{
v2DigestAlg = HashAlgorithmName.SHA256;
}
hashBytes = tsaCert.GetCertHash(v2DigestAlg);
ExpandOption(v2Option, out bool validHash, out bool skipIssuerSerial, out bool validName, out bool validSerial);
if (!validHash)
{
hashBytes[0] ^= 0xFF;
}
if (!skipIssuerSerial)
{
issuerNameBytes = BuildIssuerAndSerialNumber(tsaCert, validName, validSerial);
}
// hashBytes hasn't been wrapped in an OCTET STRING yet, so add 2 more.
int payloadSize = algBytes.Length + hashBytes.Length + issuerNameBytes.Length + 2;
Assert.InRange(payloadSize, 0, 123);
attrBytes = new byte[payloadSize + 6];
int index = 0;
// SEQUENCE (SigningCertificateV2)
attrBytes[index++] = 0x30;
attrBytes[index++] = (byte)(payloadSize + 4);
// SEQUENCE OF => certs
attrBytes[index++] = 0x30;
attrBytes[index++] = (byte)(payloadSize + 2);
// SEQUENCE (ESSCertIdV2)
attrBytes[index++] = 0x30;
attrBytes[index++] = (byte)payloadSize;
Buffer.BlockCopy(algBytes, 0, attrBytes, index, algBytes.Length);
index += algBytes.Length;
// OCTET STRING (Hash)
attrBytes[index++] = 0x04;
attrBytes[index++] = (byte)hashBytes.Length;
Buffer.BlockCopy(hashBytes, 0, attrBytes, index, hashBytes.Length);
index += hashBytes.Length;
Buffer.BlockCopy(issuerNameBytes, 0, attrBytes, index, issuerNameBytes.Length);
signer.SignedAttributes.Add(
new AsnEncodedData("1.2.840.113549.1.9.16.2.47", attrBytes));
}
cms.ComputeSignature(signer);
}
return cms.Encode();
}
private static byte[] BuildIssuerAndSerialNumber(X509Certificate2 tsaCert, bool validName, bool validSerial)
{
byte[] issuerNameBytes;
if (validName)
{
issuerNameBytes = tsaCert.IssuerName.RawData;
}
else
{
issuerNameBytes = new X500DistinguishedName("CN=No Match").RawData;
}
byte[] serialBytes = tsaCert.GetSerialNumber();
if (validSerial)
{
Array.Reverse(serialBytes);
}
else
{
// If the byte sequence was a palindrome it's still a match,
// so flip some bits.
serialBytes[0] ^= 0x7F;
}
if (issuerNameBytes.Length + serialBytes.Length > 80)
{
throw new NotSupportedException(
"Issuer name and serial length are bigger than this code can handle");
}
// SEQUENCE
// SEQUENCE
// CONTEXT-SPECIFIC 4
// [IssuerName]
// INTEGER
// [SerialNumber, big endian]
byte[] issuerAndSerialNumber = new byte[issuerNameBytes.Length + serialBytes.Length + 8];
issuerAndSerialNumber[0] = 0x30;
issuerAndSerialNumber[1] = (byte)(issuerAndSerialNumber.Length - 2);
issuerAndSerialNumber[2] = 0x30;
issuerAndSerialNumber[3] = (byte)(issuerNameBytes.Length + 2);
issuerAndSerialNumber[4] = 0xA4;
issuerAndSerialNumber[5] = (byte)(issuerNameBytes.Length);
Buffer.BlockCopy(issuerNameBytes, 0, issuerAndSerialNumber, 6, issuerNameBytes.Length);
issuerAndSerialNumber[issuerNameBytes.Length + 6] = 0x02;
issuerAndSerialNumber[issuerNameBytes.Length + 7] = (byte)serialBytes.Length;
Buffer.BlockCopy(serialBytes, 0, issuerAndSerialNumber, issuerNameBytes.Length + 8, serialBytes.Length);
return issuerAndSerialNumber;
}
private static void ExpandOption(
SigningCertificateOption option,
out bool validHash,
out bool skipIssuerSerial,
out bool validName,
out bool validSerial)
{
Assert.NotEqual(SigningCertificateOption.Omit, option);
validHash = option < SigningCertificateOption.InvalidHashNoName;
skipIssuerSerial =
option == SigningCertificateOption.ValidHashNoName ||
option == SigningCertificateOption.InvalidHashNoName;
if (skipIssuerSerial)
{
validName = validSerial = false;
}
else
{
validName =
option == SigningCertificateOption.ValidHashWithName ||
option == SigningCertificateOption.InvalidHashWithName ||
option == SigningCertificateOption.ValidHashWithInvalidSerial ||
option == SigningCertificateOption.InvalidHashWithInvalidSerial;
validSerial =
option == SigningCertificateOption.ValidHashWithName ||
option == SigningCertificateOption.InvalidHashWithName ||
option == SigningCertificateOption.ValidHashWithInvalidName ||
option == SigningCertificateOption.InvalidHashWithInvalidName;
}
}
public enum SigningCertificateOption
{
Omit,
ValidHashNoName,
ValidHashWithName,
ValidHashWithInvalidName,
ValidHashWithInvalidSerial,
InvalidHashNoName,
InvalidHashWithName,
InvalidHashWithInvalidName,
InvalidHashWithInvalidSerial,
}
}
}
|
version https://git-lfs.github.com/spec/v1
oid sha256:31ab6730ac99c1a9614d4e899add4afcedf26e1841e6560021e88760c8e437e0
size 5268
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="robots" content="index, follow, all" />
<title>Thelia\Core\Event\Product\ProductAddAccessoryEvent | Thelia 2 API</title>
<link rel="stylesheet" type="text/css" href="../../../../css/bootstrap.min.css">
<link rel="stylesheet" type="text/css" href="../../../../css/bootstrap-theme.min.css">
<link rel="stylesheet" type="text/css" href="../../../../css/sami.css">
<script src="../../../../js/jquery-1.11.1.min.js"></script>
<script src="../../../../js/bootstrap.min.js"></script>
<script src="../../../../js/typeahead.min.js"></script>
<script src="../../../../sami.js"></script>
<meta name="MobileOptimized" content="width">
<meta name="HandheldFriendly" content="true">
<meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1">
</head>
<body id="class" data-name="class:Thelia_Core_Event_Product_ProductAddAccessoryEvent" data-root-path="../../../../">
<div id="content">
<div id="left-column">
<div id="control-panel">
<form id="search-form" action="../../../../search.html" method="GET">
<span class="glyphicon glyphicon-search"></span>
<input name="search"
class="typeahead form-control"
type="search"
placeholder="Search">
</form>
</div>
<div id="api-tree"></div>
</div>
<div id="right-column">
<nav id="site-nav" class="navbar navbar-default" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#navbar-elements">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="../../../../index.html">Thelia 2 API</a>
</div>
<div class="collapse navbar-collapse" id="navbar-elements">
<ul class="nav navbar-nav">
<li><a href="../../../../classes.html">Classes</a></li>
<li><a href="../../../../namespaces.html">Namespaces</a></li>
<li><a href="../../../../interfaces.html">Interfaces</a></li>
<li><a href="../../../../traits.html">Traits</a></li>
<li><a href="../../../../doc-index.html">Index</a></li>
<li><a href="../../../../search.html">Search</a></li>
</ul>
</div>
</div>
</nav>
<div class="namespace-breadcrumbs">
<ol class="breadcrumb">
<li><span class="label label-default">class</span></li>
<li><a href="../../../../Thelia.html">Thelia</a></li>
<li><a href="../../../../Thelia/Core.html">Core</a></li>
<li><a href="../../../../Thelia/Core/Event.html">Event</a></li>
<li><a href="../../../../Thelia/Core/Event/Product.html">Product</a></li>
<li>ProductAddAccessoryEvent</li>
</ol>
</div>
<div id="page-content">
<div class="page-header">
<h1>ProductAddAccessoryEvent</h1>
</div>
<p> class
<strong>ProductAddAccessoryEvent</strong> extends <a href="../../../../Thelia/Core/Event/Product/ProductEvent.html"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a>
</p>
<h2>Properties</h2>
<table class="table table-condensed">
<tr>
<td class="type" id="property_product">
</td>
<td>$product</td>
<td class="last"></td>
<td><small>from
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#property_product"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a></small></td>
</tr>
</table>
<h2>Methods</h2>
<div class="container-fluid underlined">
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method___set">__set</a>($name, $value)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method___set"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method___get">__get</a>($name)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method___get"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_bindForm">bindForm</a>(
<abbr title="Thelia\Core\Event\Symfony\Component\Form\Form">Form</abbr> $form)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method_bindForm"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method___construct">__construct</a>(
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr> $product, $accessory_id)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_hasProduct">hasProduct</a>()
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_hasProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_getProduct">getProduct</a>()
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_getProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_setProduct">setProduct</a>(
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr> $product)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"><small>from
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_setProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a></small></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_getAccessoryId">getAccessoryId</a>()
<p class="no-description">No description</p>
</div>
<div class="col-md-2"></div>
</div>
<div class="row">
<div class="col-md-2 type">
</div>
<div class="col-md-8 type">
<a href="#method_setAccessoryId">setAccessoryId</a>($accessory_id)
<p class="no-description">No description</p>
</div>
<div class="col-md-2"></div>
</div>
</div>
<h2>Details</h2>
<div id="method-details">
<div class="method-item">
<h3 id="method___set">
<div class="location">in
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method___set"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a> at line 30</div>
<code>
<strong>__set</strong>($name, $value)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td></td>
<td>$name</td>
<td></td>
</tr>
<tr>
<td></td>
<td>$value</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
<div class="method-item">
<h3 id="method___get">
<div class="location">in
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method___get"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a> at line 35</div>
<code>
<strong>__get</strong>($name)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td></td>
<td>$name</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_bindForm">
<div class="location">in
<a href="../../../../Thelia/Core/Event/ActionEvent.html#method_bindForm"><abbr title="Thelia\Core\Event\ActionEvent">ActionEvent</abbr></a> at line 44</div>
<code>
<strong>bindForm</strong>(
<abbr title="Thelia\Core\Event\Symfony\Component\Form\Form">Form</abbr> $form)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td>
<abbr title="Thelia\Core\Event\Symfony\Component\Form\Form">Form</abbr></td>
<td>$form</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
<div class="method-item">
<h3 id="method___construct">
<div class="location">at line 21</div>
<code>
<strong>__construct</strong>(
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr> $product, $accessory_id)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td>
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr></td>
<td>$product</td>
<td></td>
</tr>
<tr>
<td></td>
<td>$accessory_id</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_hasProduct">
<div class="location">in
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_hasProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a> at line 27</div>
<code>
<strong>hasProduct</strong>()</code>
</h3>
<div class="details">
<div class="tags">
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_getProduct">
<div class="location">in
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_getProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a> at line 32</div>
<code>
<strong>getProduct</strong>()</code>
</h3>
<div class="details">
<div class="tags">
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_setProduct">
<div class="location">in
<a href="../../../../Thelia/Core/Event/Product/ProductEvent.html#method_setProduct"><abbr title="Thelia\Core\Event\Product\ProductEvent">ProductEvent</abbr></a> at line 37</div>
<code>
<strong>setProduct</strong>(
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr> $product)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td>
<abbr title="Thelia\Core\Event\Product\Thelia\Model\Product">Product</abbr></td>
<td>$product</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_getAccessoryId">
<div class="location">at line 28</div>
<code>
<strong>getAccessoryId</strong>()</code>
</h3>
<div class="details">
<div class="tags">
</div>
</div>
</div>
<div class="method-item">
<h3 id="method_setAccessoryId">
<div class="location">at line 33</div>
<code>
<strong>setAccessoryId</strong>($accessory_id)</code>
</h3>
<div class="details">
<div class="tags">
<h4>Parameters</h4>
<table class="table table-condensed">
<tr>
<td></td>
<td>$accessory_id</td>
<td></td>
</tr>
</table>
</div>
</div>
</div>
</div>
</div>
<div id="footer">
Generated by <a href="http://sami.sensiolabs.org/">Sami, the API Documentation Generator</a>.
</div>
</div>
</div>
</body>
</html>
|
# Copyright Niall Douglas 2005.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from voidptr_ext import *
Check for correct conversion
>>> use(get())
Check that None is converted to a NULL void pointer
>>> useany(get())
1
>>> useany(None)
0
Check that we don't lose type information by converting NULL
opaque pointers to None
>>> assert getnull() is None
>>> useany(getnull())
0
Check that there is no conversion from integers ...
>>> try: use(0)
... except TypeError: pass
... else: print 'expected a TypeError'
... and from strings to opaque objects
>>> try: use("")
... except TypeError: pass
... else: print 'expected a TypeError'
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
/*******************************************************/
/* "C" Language Integrated Production System */
/* */
/* CLIPS Version 6.20 01/31/02 */
/* */
/* DEFMODULE CONSTRUCT COMPILER HEADER FILE */
/*******************************************************/
/*************************************************************/
/* Purpose: Implements the constructs-to-c feature for the */
/* defmodule construct. */
/* */
/* Principal Programmer(s): */
/* Gary D. Riley */
/* */
/* Contributing Programmer(s): */
/* */
/* Revision History: */
/* */
/*************************************************************/
#ifndef _H_modulcmp
#define _H_modulcmp
#ifndef _STDIO_INCLUDED_
#define _STDIO_INCLUDED_
#include <stdio.h>
#endif
#ifndef _H_moduldef
#include "moduldef.h"
#endif
#ifdef LOCALE
#undef LOCALE
#endif
#ifdef _MODULCMP_SOURCE_
#define LOCALE
#else
#define LOCALE extern
#endif
LOCALE void DefmoduleCompilerSetup(void *);
LOCALE void PrintDefmoduleReference(void *,FILE *,struct defmodule *);
#endif
|
<?php
if(!defined('DS')) define('DS', DIRECTORY_SEPARATOR);
if(!defined('MB')) define('MB', (int)function_exists('mb_get_info'));
if(!defined('BOM')) define('BOM', "\xEF\xBB\xBF");
// polyfill for new sort flag
if(!defined('SORT_NATURAL')) define('SORT_NATURAL', 'SORT_NATURAL');
// a super simple autoloader
function load($classmap, $base = null) {
spl_autoload_register(function($class) use ($classmap, $base) {
$class = strtolower($class);
if(!isset($classmap[$class])) return false;
if($base) {
include($base . DS . $classmap[$class]);
} else {
include($classmap[$class]);
}
});
}
// auto-load all toolkit classes
load(array(
// classes
'a' => __DIR__ . DS . 'lib' . DS . 'a.php',
'bitmask' => __DIR__ . DS . 'lib' . DS . 'bitmask.php',
'brick' => __DIR__ . DS . 'lib' . DS . 'brick.php',
'c' => __DIR__ . DS . 'lib' . DS . 'c.php',
'cookie' => __DIR__ . DS . 'lib' . DS . 'cookie.php',
'cache' => __DIR__ . DS . 'lib' . DS . 'cache.php',
'cache\\driver' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver.php',
'cache\\driver\\apc' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver' . DS . 'apc.php',
'cache\\driver\\file' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver' . DS . 'file.php',
'cache\\driver\\memcached' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver' . DS . 'memcached.php',
'cache\\driver\\mock' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver' . DS . 'mock.php',
'cache\\driver\\session' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'driver' . DS . 'session.php',
'cache\\value' => __DIR__ . DS . 'lib' . DS . 'cache' . DS . 'value.php',
'collection' => __DIR__ . DS . 'lib' . DS . 'collection.php',
'crypt' => __DIR__ . DS . 'lib' . DS . 'crypt.php',
'data' => __DIR__ . DS . 'lib' . DS . 'data.php',
'database' => __DIR__ . DS . 'lib' . DS . 'database.php',
'database\\query' => __DIR__ . DS . 'lib' . DS . 'database' . DS . 'query.php',
'db' => __DIR__ . DS . 'lib' . DS . 'db.php',
'detect' => __DIR__ . DS . 'lib' . DS . 'detect.php',
'dimensions' => __DIR__ . DS . 'lib' . DS . 'dimensions.php',
'dir' => __DIR__ . DS . 'lib' . DS . 'dir.php',
'email' => __DIR__ . DS . 'lib' . DS . 'email.php',
'embed' => __DIR__ . DS . 'lib' . DS . 'embed.php',
'error' => __DIR__ . DS . 'lib' . DS . 'error.php',
'errorreporting' => __DIR__ . DS . 'lib' . DS . 'errorreporting.php',
'escape' => __DIR__ . DS . 'lib' . DS . 'escape.php',
'exif' => __DIR__ . DS . 'lib' . DS . 'exif.php',
'exif\\camera' => __DIR__ . DS . 'lib' . DS . 'exif' . DS . 'camera.php',
'exif\\location' => __DIR__ . DS . 'lib' . DS . 'exif' . DS . 'location.php',
'f' => __DIR__ . DS . 'lib' . DS . 'f.php',
'folder' => __DIR__ . DS . 'lib' . DS . 'folder.php',
'header' => __DIR__ . DS . 'lib' . DS . 'header.php',
'html' => __DIR__ . DS . 'lib' . DS . 'html.php',
'i' => __DIR__ . DS . 'lib' . DS . 'i.php',
'l' => __DIR__ . DS . 'lib' . DS . 'l.php',
'media' => __DIR__ . DS . 'lib' . DS . 'media.php',
'obj' => __DIR__ . DS . 'lib' . DS . 'obj.php',
'pagination' => __DIR__ . DS . 'lib' . DS . 'pagination.php',
'password' => __DIR__ . DS . 'lib' . DS . 'password.php',
'r' => __DIR__ . DS . 'lib' . DS . 'r.php',
'redirect' => __DIR__ . DS . 'lib' . DS . 'redirect.php',
'remote' => __DIR__ . DS . 'lib' . DS . 'remote.php',
'response' => __DIR__ . DS . 'lib' . DS . 'response.php',
'router' => __DIR__ . DS . 'lib' . DS . 'router.php',
's' => __DIR__ . DS . 'lib' . DS . 's.php',
'server' => __DIR__ . DS . 'lib' . DS . 'server.php',
'silo' => __DIR__ . DS . 'lib' . DS . 'silo.php',
'sql' => __DIR__ . DS . 'lib' . DS . 'sql.php',
'str' => __DIR__ . DS . 'lib' . DS . 'str.php',
'system' => __DIR__ . DS . 'lib' . DS . 'system.php',
'thumb' => __DIR__ . DS . 'lib' . DS . 'thumb.php',
'timer' => __DIR__ . DS . 'lib' . DS . 'timer.php',
'toolkit' => __DIR__ . DS . 'lib' . DS . 'toolkit.php',
'tpl' => __DIR__ . DS . 'lib' . DS . 'tpl.php',
'upload' => __DIR__ . DS . 'lib' . DS . 'upload.php',
'url' => __DIR__ . DS . 'lib' . DS . 'url.php',
'v' => __DIR__ . DS . 'lib' . DS . 'v.php',
'visitor' => __DIR__ . DS . 'lib' . DS . 'visitor.php',
'xml' => __DIR__ . DS . 'lib' . DS . 'xml.php',
'yaml' => __DIR__ . DS . 'lib' . DS . 'yaml.php',
// vendors
'spyc' => __DIR__ . DS . 'vendors' . DS . 'yaml' . DS . 'yaml.php',
'abeautifulsite\\simpleimage' => __DIR__ . DS . 'vendors' . DS . 'abeautifulsite' . DS . 'SimpleImage.php',
'mimereader' => __DIR__ . DS . 'vendors' . DS . 'mimereader' . DS . 'mimereader.php',
));
// load all helpers
include(__DIR__ . DS . 'helpers.php'); |
/**
* Copyright (c) 2014-2017 by the respective copyright holders.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.eclipse.smarthome.core.events;
/**
* An {@link EventFilter} can be provided by an {@link EventSubscriber} in order
* to receive specific {@link Event}s by an {@link EventPublisher} if the filter applies.
*
* @author Stefan Bußweiler - Initial contribution
*/
public interface EventFilter {
/**
* Apply the filter on an event. <p> This method is called for each subscribed {@link Event} of an
* {@link EventSubscriber}. If the filter applies, the event will be dispatched to the
* {@link EventSubscriber#receive(Event)} method.
*
* @param event the event (not null)
* @return true if the filter criterion applies
*/
boolean apply(Event event);
}
|
/*
* hx8369 TFT-LCD Panel Driver for the Samsung Universal board
*
* Derived from drivers/video/samsung/s3cfb_hx8369.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/wait.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/lcd.h>
#include <linux/backlight.h>
#include <plat/gpio-cfg.h>
#include "s3cfb.h"
#include "hx8369.h"
#if defined(CONFIG_MACH_VENTURI)
#include "s3cfb_mdnie.h"
#endif
// Brightness Level
#define DIM_BL 20
#define MIN_BL 30
#define MAX_BL 255
#define MAX_GAMMA_VALUE 24 // Venturi_?
#define CRITICAL_BATTERY_LEVEL 5
// Venturi_MustDelete
#define GAMMASET_CONTROL //for 1.9/2.2 gamma control from platform
#if defined(CONFIG_MACH_VENTURI)
#define MAX_BRIGHTNESS_LEVEL 255
#define LOW_BRIGHTNESS_LEVEL 30
#define MAX_BACKLIGHT_VALUE_SONY 183 //180 // 190
#define LOW_BACKLIGHT_VALUE_SONY 13 //8 //39 // 50
#define DIM_BACKLIGHT_VALUE_SONY 13 //8 //23 // 30
#endif
// Backlight Level
typedef enum {
BACKLIGHT_LEVEL_OFF = 0,
BACKLIGHT_LEVEL_DIMMING = 1,
BACKLIGHT_LEVEL_NORMAL = 6
} backlight_level_t;
/*********** for debug **********************************************************/
#if 0
#define gprintk(fmt, x... ) printk( "%s(%d): " fmt, __FUNCTION__ ,__LINE__, ## x)
#else
#define gprintk(x...) do { } while (0)
#endif
/*******************************************************************************/
/* CABC ************************************************************************/
typedef enum
{
CABC_OFF,
CABC_UI,
CABC_IMAGE,
CABC_VIDEO,
}LCD_CABC;
static LCD_CABC cur_cabc = CABC_OFF;
/*******************************************************************************/
static int ldi_enable = 0;
backlight_level_t backlight_level = BACKLIGHT_LEVEL_OFF;
static int bd_brightness = 0;
static int on_19gamma = 0;
static DEFINE_MUTEX(spi_use);
#if defined(CONFIG_MACH_VENTURI)
#define PWM_REG_OFFSET 1
static unsigned short brightness_setting_table[] = {
0x051, 0x17f,
ENDDEF, 0x0000
};
#endif
struct s5p_lcd {
struct spi_device *g_spi;
struct lcd_device *lcd_dev;
struct backlight_device *bl_dev;
struct early_suspend early_suspend;
};
#ifdef GAMMASET_CONTROL
struct class *gammaset_class;
struct device *switch_gammaset_dev;
#endif
#ifdef CONFIG_FB_S3C_HX8369_ACL
static int acl_enable = 0;
struct class *acl_class;
struct device *switch_aclset_dev;
#endif
#ifdef CONFIG_FB_S3C_MDNIE
extern void init_mdnie_class(void);
#endif
static struct s5p_lcd lcd;
/*
* Venturi LCD Spec
* DOTCLK = FrameRate x (HSW+HBP+XRES+HFP) x (VSW+VBP+800+VFP)
* = 60 x (8+24+480+24) x (8+24+800+24) = 27,528,960 = 27.52MHz
*/
static struct s3cfb_lcd hx8369 = {
.width = 480,
.height = 800,
.p_width = 52, // Venturi_? height of lcd in mm
.p_height = 86, // Venturi_? width of lcd in mm
.bpp = 24,
.freq = 60, // Venturi_?
.timing = {
.h_fp = 32,
.h_bp = 32,
.h_sw = 14,
.v_fp = 12,
.v_fpe = 1,
.v_bp = 12,
.v_bpe = 1,
.v_sw = 8,
},
.polarity = {
.rise_vclk = 0, // video data fetch at DOTCLK falling edge
.inv_hsync = 1, // low active
.inv_vsync = 1, // low active
.inv_vden = 0, // data is vaild when DEpin is high
},
};
static void wait_ldi_enable(void);
static int hx8369_spi_write_driver(int reg)
{
u16 buf[1];
int ret;
struct spi_message msg;
struct spi_transfer xfer = {
.len = 2,
.tx_buf = buf,
};
buf[0] = reg;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
ret = spi_sync(lcd.g_spi, &msg);
if (ret < 0)
pr_err("%s::%d -> spi_sync failed Err=%d\n",__func__,__LINE__,ret);
return ret ;
}
static void hx8369_spi_write(unsigned short reg)
{
hx8369_spi_write_driver(reg);
}
static void hx8369_panel_send_sequence(const unsigned short *wbuf)
{
int i = 0;
mutex_lock(&spi_use);
gprintk("#################SPI start##########################\n");
while ((wbuf[i] & DEFMASK) != ENDDEF) {
if ((wbuf[i] & DEFMASK) != SLEEPMSEC){
hx8369_spi_write(wbuf[i]);
i+=1;}
else{
msleep(wbuf[i+1]);
i+=2;}
}
gprintk("#################SPI end##########################\n");
mutex_unlock(&spi_use);
}
int IsLDIEnabled(void)
{
return ldi_enable;
}
EXPORT_SYMBOL(IsLDIEnabled);
static void SetLDIEnabledFlag(int OnOff)
{
ldi_enable = OnOff;
}
#if defined(CONFIG_MACH_VENTURI)
extern Lcd_mDNIe_UI current_mDNIe_UI;
void on_cabc(void)
{
if(acl_enable == 0)
return;
// ACL ON
switch(current_mDNIe_UI)
{
case mDNIe_UI_MODE:
#if 0 //2010.12.19 disabled CABC_UI Mode, requested by younghyup.kim
if(cur_cabc != CABC_UI)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_UI);
cur_cabc = CABC_UI;
gprintk("set hx8369_SEQ_CABC_UI\n");
}
#else
if(cur_cabc != CABC_OFF)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_OFF);
cur_cabc = CABC_OFF;
gprintk("set hx8369_SEQ_CABC_OFF\n");
}
#endif
break;
case mDNIe_VIDEO_MODE:
case mDNIe_VIDEO_WARM_MODE:
case mDNIe_VIDEO_COLD_MODE:
case mDNIe_CAMERA_MODE:
case mDNIe_DMB_MODE:
case mDNIe_DMB_WARM_MODE:
case mDNIe_DMB_COLD_MODE:
if(cur_cabc != CABC_VIDEO)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_VIDEO);
cur_cabc = CABC_VIDEO;
gprintk("set hx8369_SEQ_CABC_VIDEO\n");
}
break;
case mDNIe_NAVI:
if(cur_cabc != CABC_IMAGE)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_IMAGE);
cur_cabc = CABC_IMAGE;
gprintk("set hx8369_SEQ_CABC_IMAGE\n");
}
break;
default:
#if 0 //2010.12.19 disabled CABC_UI Mode, requested by younghyup.kim
if(cur_cabc != CABC_UI)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_UI);
cur_cabc = CABC_UI;
gprintk("set hx8369_SEQ_CABC_UI-\n");
}
#else
if(cur_cabc != CABC_OFF)
{
hx8369_panel_send_sequence(hx8369_SEQ_CABC_OFF);
cur_cabc = CABC_OFF;
gprintk("set hx8369_SEQ_CABC_OFF-\n");
}
#endif
break;
}
}
void off_cabc(void)
{
// check LDI Status
wait_ldi_enable();
if (!IsLDIEnabled())
{
printk("<<<<<<<<<<< [off_cabc] failt to CABC Mode Setting >>>>>>>>>>>>>>>>\n");
return;
}
// ACL OFF
gprintk("set hx8369_SEQ_CABC_OFF\n");
cur_cabc = CABC_OFF;
hx8369_panel_send_sequence(hx8369_SEQ_CABC_OFF);
}
//#define CABC_TEST
#ifdef CABC_TEST
static int cabc_mode = 0;
static ssize_t test_cabc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
gprintk("[%s] cabc_mode = %d\n",__func__, cabc_mode);
return sprintf(buf,"%u\n", cabc_mode);
}
static ssize_t test_cabc_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int value;
sscanf(buf, "%d", &value);
switch(value)
{
case 1:
hx8369_panel_send_sequence(hx8369_SEQ_CABC_UI); cabc_mode = 1; gprintk("[%s] set CABC_UI\n", __func__);
break;
case 2:
hx8369_panel_send_sequence(hx8369_SEQ_CABC_IMAGE); cabc_mode = 2; gprintk("[%s] set CABC_IMAGE\n", __func__);
break;
case 3:
hx8369_panel_send_sequence(hx8369_SEQ_CABC_VIDEO); cabc_mode = 3; gprintk("[%s] set CABC_VIDEO\n", __func__);
default:
hx8369_panel_send_sequence(hx8369_SEQ_CABC_OFF); cabc_mode = 0; gprintk("[%s] set CABC_OFF\n", __func__);
break;
}
return size;
}
static DEVICE_ATTR(test_cabc,0664, test_cabc_show, test_cabc_store);
#endif
#endif
void hx8369_ldi_init(void)
{
gprintk("[%s]\n", __func__);
// msleep(120); // add by SMD
hx8369_panel_send_sequence(hx8369_SEQ_SETTING);
hx8369_panel_send_sequence(hx8369_BACKLIGHT_SETTING);
SetLDIEnabledFlag(1);
if(acl_enable)
on_cabc();
}
void hx8369_ldi_enable(void)
{
}
void hx8369_ldi_disable(void)
{
hx8369_panel_send_sequence(hx8369_SEQ_DISPLAY_OFF);
SetLDIEnabledFlag(0);
printk(KERN_DEBUG "LDI disable ok\n");
//pr_info("%s::%d -> ldi disabled\n",__func__,__LINE__);
}
void s3cfb_set_lcd_info(struct s3cfb_global *ctrl)
{
hx8369.init_ldi = NULL;
ctrl->lcd = &hx8369;
}
// Venturi
#if defined (CONFIG_MACH_VENTURI)
static int get_pwm_value_from_bl(int level)
{
int tune_value;
// SMD LCD
if(level > MAX_BRIGHTNESS_LEVEL)
level = MAX_BRIGHTNESS_LEVEL;
if(level >= LOW_BRIGHTNESS_LEVEL)
tune_value = (level - LOW_BRIGHTNESS_LEVEL) * (MAX_BACKLIGHT_VALUE_SONY-LOW_BACKLIGHT_VALUE_SONY) / (MAX_BRIGHTNESS_LEVEL-LOW_BRIGHTNESS_LEVEL) + LOW_BACKLIGHT_VALUE_SONY;
else if(level > 0)
tune_value = DIM_BACKLIGHT_VALUE_SONY;
else
tune_value = level;
if(tune_value > MAX_BACKLIGHT_VALUE_SONY)
tune_value = MAX_BACKLIGHT_VALUE_SONY; // led_val must be less than or equal to MAX_BACKLIGHT_VALUE
if(level && !tune_value)
tune_value = 1;
return tune_value;
}
static int update_brightness(int level)
{
unsigned int led_val;
// check LDI Status
wait_ldi_enable();
if (!IsLDIEnabled())
{
printk("<<<<<<<<<<< [update_brightness] brightness setting error >>>>>>>>>>>>>>>>\n");
return 0;
}
led_val = get_pwm_value_from_bl(level);
brightness_setting_table[PWM_REG_OFFSET] = 0x100 | (led_val & 0xff);
gprintk("[bl]%d(%d)\n", level, brightness_setting_table[PWM_REG_OFFSET]&0xff);
hx8369_panel_send_sequence(brightness_setting_table);
return 0;
}
void backlight_onoff(backlight_level_t f_onoff)
{
gprintk("[%s]=%d\n", __func__, f_onoff);
if(f_onoff)
{
// on
if (gpio_is_valid(GPIO_BACKLIGHT_EN))
{
if (gpio_request(GPIO_BACKLIGHT_EN, "GPD0"))
printk("Failed to request GPIO_BACKLIGHT_EN!\n");
s3c_gpio_cfgpin(GPIO_BACKLIGHT_EN, S3C_GPIO_OUTPUT);
gpio_direction_output(GPIO_BACKLIGHT_EN, (int)1);
}
s3c_gpio_setpull(GPIO_BACKLIGHT_EN, S3C_GPIO_PULL_NONE);
gpio_free(GPIO_BACKLIGHT_EN);
backlight_level = BACKLIGHT_LEVEL_NORMAL;
//printk("[VIBETONZ] ENABLE\n");
}
else
{
// off
if (gpio_is_valid(GPIO_BACKLIGHT_EN))
{
if (gpio_request(GPIO_BACKLIGHT_EN, "GPD0"))
printk("Failed to request GPIO_BACKLIGHT_EN!\n");
s3c_gpio_cfgpin(GPIO_BACKLIGHT_EN, S3C_GPIO_OUTPUT);
gpio_direction_output(GPIO_BACKLIGHT_EN, (int)0);
}
s3c_gpio_setpull(GPIO_BACKLIGHT_EN, S3C_GPIO_PULL_NONE);
gpio_free(GPIO_BACKLIGHT_EN);
backlight_level = BACKLIGHT_LEVEL_OFF;
}
}
EXPORT_SYMBOL(backlight_onoff);
void hx8369_backlight_init(void)
{
backlight_onoff(BACKLIGHT_LEVEL_NORMAL);
}
void hx8369_backlight_resume(void)
{
hx8369_ldi_init();
backlight_onoff(BACKLIGHT_LEVEL_NORMAL);
printk(KERN_DEBUG "LDI enable ok\n");
pr_info("%s::%d -> ldi initialized\n",__func__,__LINE__);
}
void hx8369_backlight_suspend(void)
{
backlight_onoff(BACKLIGHT_LEVEL_OFF);
hx8369_ldi_disable();
printk(KERN_DEBUG "LDI disable ok\n");
pr_info("%s::%d -> ldi disabled\n",__func__,__LINE__);
}
#endif
int s5p_lcd_set_powerOnOff(int power)
{
printk("[%s][minhyodebug]=================\n", __func__);
if (power)
{
printk("[%s] = on\n", __func__);
// msleep(120); // add by SMD
hx8369_panel_send_sequence(hx8369_SEQ_SETTING);
hx8369_panel_send_sequence(hx8369_BACKLIGHT_SETTING);
backlight_onoff(BACKLIGHT_LEVEL_NORMAL);
SetLDIEnabledFlag(1);
printk(KERN_DEBUG "LDI enable ok\n");
pr_info("%s::%d -> ldi initialized\n",__func__,__LINE__);
}
else
{
hx8369_panel_send_sequence(hx8369_SEQ_DISPLAY_OFF);
backlight_onoff(BACKLIGHT_LEVEL_OFF);
SetLDIEnabledFlag(0);
printk(KERN_DEBUG "LDI disable ok\n");
pr_info("%s::%d -> ldi disabled\n",__func__,__LINE__);
}
return 0;
}
// LCD ON/OFF function
static int s5p_lcd_set_power(struct lcd_device *ld, int power)
{
printk("s5p_lcd_set_power is called:============================== %d", power);
s5p_lcd_set_powerOnOff(power);
return 0;
}
static int s5p_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
{
return 0;
}
static struct lcd_ops s5p_lcd_ops = {
.set_power = s5p_lcd_set_power,
.check_fb = s5p_lcd_check_fb,
};
#ifdef GAMMASET_CONTROL //for 1.9/2.2 gamma control from platform
static ssize_t gammaset_file_cmd_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
gprintk("called %s \n",__func__);
return sprintf(buf,"%u\n", bd_brightness);
}
static ssize_t gammaset_file_cmd_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
int value;
sscanf(buf, "%d", &value);
// not support for TFT-LCD
return size;
}
static DEVICE_ATTR(gammaset_file_cmd,0664, gammaset_file_cmd_show, gammaset_file_cmd_store);
#endif
#ifdef CONFIG_FB_S3C_HX8369_ACL
static ssize_t aclset_file_cmd_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf,"%u\n", acl_enable);
}
static ssize_t aclset_file_cmd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
{
int value;
sscanf(buf, "%d", &value);
gprintk("CABC = %d\n", value );
if (!IsLDIEnabled()) {
printk(KERN_DEBUG "[acl set] return because LDI is disabled, input value = %d \n", value);
return size;
}
if ((value != 0) && (value != 1)) {
gprintk(KERN_DEBUG "\naclset_file_cmd_store value is same : value(%d)\n", value);
return size;
}
if (acl_enable != value)
{
acl_enable = value;
if (acl_enable == 1)
on_cabc();
else
off_cabc();
}
return size;
}
static DEVICE_ATTR(aclset_file_cmd,0664, aclset_file_cmd_show, aclset_file_cmd_store);
#endif
#ifdef CONFIG_FB_S3C_MDNIE_TUNINGMODE_FOR_BACKLIGHT
extern void mDNIe_Mode_set_for_backlight(u16 *buf);
extern u16 *pmDNIe_Gamma_set[];
extern int pre_val;
extern int autobrightness_mode;
#endif
static void wait_ldi_enable(void)
{
int i = 0;
for (i = 0; i < 100; i++) {
gprintk("ldi_enable : %d \n", ldi_enable);
if(IsLDIEnabled())
break;
msleep(10);
};
}
#if 0
static void off_display(void)
{
msleep(20);
hx8369_panel_send_sequence(hx8369_SEQ_DISPLAY_OFF);
bd_brightness = 0;
backlight_level = BACKLIGHT_LEVEL_OFF;
SetLDIEnabledFlag(0);
printk(KERN_DEBUG "LDI disable ok\n");
pr_info("%s::%d -> ldi disabled\n",__func__,__LINE__);
}
#endif
static int s5p_bl_update_status(struct backlight_device* bd)
{
int br = bd->props.brightness; // Brightness Level
backlight_level_t level = BACKLIGHT_LEVEL_OFF; // Brightness Mode
int bl = 0; // Backlight Level
// check brightness level
if(br < 0)
return 0;
// check LDI Status
wait_ldi_enable();
if (!IsLDIEnabled())
return 0;
// decide Brightness Mode & Backlight Level
if(br == 0) { level = BACKLIGHT_LEVEL_OFF; }
else if(br < MIN_BL) { level = BACKLIGHT_LEVEL_DIMMING; }
else { level = BACKLIGHT_LEVEL_NORMAL; }
update_brightness(br);
// update Backlight & Brightness & Screen Mode
bd_brightness = br; // Brightness Level
backlight_level = level; // Backlight Level
gprintk("[%s] br = %d, bl = %d\n", __func__, br, bl);
return 0;
}
static int s5p_bl_get_brightness(struct backlight_device* bd)
{
gprintk("[%s] Brightness Read = %d\n", __func__, bd_brightness);
return bd_brightness;
}
static struct backlight_ops s5p_bl_ops = {
.update_status = s5p_bl_update_status,
.get_brightness = s5p_bl_get_brightness,
};
void hx8396_early_suspend(struct early_suspend *h)
{
hx8369_backlight_suspend();
return ;
}
void hx8396_late_resume(struct early_suspend *h)
{
hx8369_backlight_resume();
return ;
}
static int __init hx8396_probe(struct spi_device *spi)
{
int ret;
printk("hx8396_probe INIT ..........\n");
gprintk("[%s] \n", __func__);
spi->bits_per_word = 9;
ret = spi_setup(spi);
lcd.g_spi = spi;
lcd.lcd_dev = lcd_device_register("s5p_lcd",&spi->dev,&lcd,&s5p_lcd_ops);
lcd.bl_dev = backlight_device_register("s5p_bl",&spi->dev,&lcd,&s5p_bl_ops,NULL);
lcd.bl_dev->props.max_brightness = 255;
dev_set_drvdata(&spi->dev,&lcd);
SetLDIEnabledFlag(1);
#ifdef GAMMASET_CONTROL //for 1.9/2.2 gamma control from platform
gammaset_class = class_create(THIS_MODULE, "gammaset");
if (IS_ERR(gammaset_class))
pr_err("Failed to create class(gammaset_class)!\n");
switch_gammaset_dev = device_create(gammaset_class, NULL, 0, NULL, "switch_gammaset");
if (IS_ERR(switch_gammaset_dev))
pr_err("Failed to create device(switch_gammaset_dev)!\n");
if (device_create_file(switch_gammaset_dev, &dev_attr_gammaset_file_cmd) < 0)
pr_err("Failed to create device file(%s)!\n", dev_attr_gammaset_file_cmd.attr.name);
#endif
#ifdef CONFIG_FB_S3C_HX8369_ACL //ACL On,Off
acl_class = class_create(THIS_MODULE, "aclset");
if (IS_ERR(acl_class))
pr_err("Failed to create class(acl_class)!\n");
switch_aclset_dev = device_create(acl_class, NULL, 0, NULL, "switch_aclset");
if (IS_ERR(switch_aclset_dev))
pr_err("Failed to create device(switch_aclset_dev)!\n");
if (device_create_file(switch_aclset_dev, &dev_attr_aclset_file_cmd) < 0)
pr_err("Failed to create device file(%s)!\n", dev_attr_aclset_file_cmd.attr.name);
#endif
hx8369_backlight_init();
#ifdef CONFIG_FB_S3C_MDNIE
init_mdnie_class(); //set mDNIe UI mode, Outdoormode
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
lcd.early_suspend.suspend = hx8396_early_suspend;
lcd.early_suspend.resume = hx8396_late_resume;
lcd.early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
register_early_suspend(&lcd.early_suspend);
#endif
if (ret < 0) {
pr_err("%s::%d-> hx8369 probe failed Err=%d\n",__func__,__LINE__,ret);
return 0;
}
pr_info("%s::%d->hx8369 probed successfuly\n",__func__,__LINE__);
return ret;
}
#if 0
#ifdef CONFIG_PM // add by ksoo (2009.09.07)
int hx8369_suspend(struct platform_device *pdev, pm_message_t state)
{
pr_info("%s::%d->hx8369 suspend called\n",__func__,__LINE__);
hx8369_ldi_disable();
return 0;
}
int hx8369_resume(struct platform_device *pdev, pm_message_t state)
{
pr_info("%s::%d -> hx8369 resume called\n",__func__,__LINE__);
hx8369_ldi_init();
hx8369_ldi_enable();
return 0;
}
#endif /* CONFIG_PM */
#endif
static struct spi_driver hx8369_driver = {
.driver = {
.name = "hx8369",
.owner = THIS_MODULE,
},
.probe = hx8396_probe,
.remove = __exit_p(hx8369_remove),
};
static int __init hx8369_init(void)
{
printk("hx8369_init INIT ..........\n");
return spi_register_driver(&hx8369_driver);
}
static void __exit hx8369_exit(void)
{
spi_unregister_driver(&hx8369_driver);
}
module_init(hx8369_init);
module_exit(hx8369_exit);
MODULE_AUTHOR("SAMSUNG");
MODULE_DESCRIPTION("hx8369 LDI driver");
MODULE_LICENSE("GPL");
|
// Default ECO Layout
// KLE here : http://www.keyboard-layout-editor.com/#/gists/0733eca6b4cb88ff9d7de746803f4039
#include QMK_KEYBOARD_H
extern keymap_config_t keymap_config;
// Each layer gets a name for readability, which is then used in the keymap matrix below.
// The underscores don't mean anything - you can have a layer called STUFF or any other name.
// Layer names don't all need to be of the same length, obviously, and you can also skip them
// entirely and just use numbers.
#define _QWERTY 0
#define _FN1 2
#define _FN2 3
enum eco_keycodes {
QWERTY = SAFE_RANGE
};
// Defines for task manager and such
#define CALTDEL LCTL(LALT(KC_DEL))
#define TSKMGR LCTL(LSFT(KC_ESC))
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
/* Qwerty
* ,-------------------------------------------------------------------------------------------------.
* | Esc | Q | W | E | R | T | Y | U | I | O | P | [ | ] | Bksp |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | Tab | A | S | D | F | G | H | J | K | L | ; | ' | Enter| \ |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | Shift| Z | X | C | V | B | N | M | , | . | / | Shift| Up | Del |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | Ctrl | GUI | Alt | Del | FN1 | FN1 | Space| Space| FN2 | FN2 | Ctrl | Left | Down | Right|
* `-------------------------------------------------------------------------------------------------'
*/
[_QWERTY] = LAYOUT(
KC_ESC, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC, KC_BSPC,
KC_TAB, KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_QUOT, KC_ENT, KC_BSLS,
KC_LSFT, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_RSFT, KC_UP, KC_DEL,
KC_LCTL, KC_LGUI,KC_LALT, KC_DEL, MO(_FN1), MO(_FN1), KC_SPC, KC_SPC, MO(_FN2), MO(_FN2), KC_RCTL, KC_LEFT, KC_DOWN, KC_RGHT
),
/* FN1
* ,-------------------------------------------------------------------------------------------------.
* | | ! | @ | # | $ | % | ^ | & | * | ( | ) | _ | + | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* |caltde| F1 | F2 | F3 | F4 | F5 | F6 | F7 | F8 | F9 | F10 | F11 | F12 | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | | | | | | | | | | | | | | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | | | | | | | | | | | | | | RESET|
* `-------------------------------------------------------------------------------------------------'
*/
[_FN1] = LAYOUT(
_______, KC_EXLM, KC_AT, KC_HASH, KC_DLR, KC_PERC, KC_CIRC, KC_AMPR, KC_ASTR, KC_LPRN, KC_RPRN, KC_UNDS, KC_PLUS, _______,
CALTDEL, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, RESET
),
/* Raise
* ,-------------------------------------------------------------------------------------------------.
* | | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 0 | - | = | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* |Taskmg| F1 | F2 | F3 | F4 | F5 | F6 | F7 | F8 | F9 | F10 | F11 | F12 | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | | | | | | | | | | | | | | |
* |------+------+------+------+------+------+------+------+------+------+------+------+------+------|
* | | | | | | | | | | | | | | |
* `-------------------------------------------------------------------------------------------------'
*/
[_FN2] = LAYOUT(
_______, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, _______,
TSKMGR, KC_F1, KC_F2, KC_F3, KC_F4, KC_F5, KC_F6, KC_F7, KC_F8, KC_F9, KC_F10, KC_F11, KC_F12, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______, _______
)
};
void persistant_default_layer_set(uint16_t default_layer) {
eeconfig_update_default_layer(default_layer);
default_layer_set(default_layer);
}
bool process_record_user(uint16_t keycode, keyrecord_t *record) {
switch (keycode) {
case QWERTY:
if (record->event.pressed) {
persistant_default_layer_set(1UL<<_QWERTY);
}
return false;
break;
}
return true;
}
|
#!/bin/sh
#
# Copyright (c) 2009, 2010 David Aguilar
#
test_description='git-difftool
Testing basic diff tool invocation
'
. ./test-lib.sh
LF='
'
remove_config_vars()
{
# Unset all config variables used by git-difftool
git config --unset diff.tool
git config --unset diff.guitool
git config --unset difftool.test-tool.cmd
git config --unset difftool.prompt
git config --unset merge.tool
git config --unset mergetool.test-tool.cmd
git config --unset mergetool.prompt
return 0
}
restore_test_defaults()
{
# Restores the test defaults used by several tests
remove_config_vars
unset GIT_DIFF_TOOL
unset GIT_DIFFTOOL_PROMPT
unset GIT_DIFFTOOL_NO_PROMPT
git config diff.tool test-tool &&
git config difftool.test-tool.cmd 'cat $LOCAL'
git config difftool.bogus-tool.cmd false
}
prompt_given()
{
prompt="$1"
test "$prompt" = "Hit return to launch 'test-tool': branch"
}
# Create a file on master and change it on branch
test_expect_success PERL 'setup' '
echo master >file &&
git add file &&
git commit -m "added file" &&
git checkout -b branch master &&
echo branch >file &&
git commit -a -m "branch changed file" &&
git checkout master
'
# Configure a custom difftool.<tool>.cmd and use it
test_expect_success PERL 'custom commands' '
restore_test_defaults &&
git config difftool.test-tool.cmd "cat \$REMOTE" &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "master" &&
restore_test_defaults &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch"
'
# Ensures that git-difftool ignores bogus --tool values
test_expect_success PERL 'difftool ignores bad --tool values' '
diff=$(git difftool --no-prompt --tool=bad-tool branch)
test "$?" = 1 &&
test "$diff" = ""
'
test_expect_success PERL 'difftool honors --gui' '
git config merge.tool bogus-tool &&
git config diff.tool bogus-tool &&
git config diff.guitool test-tool &&
diff=$(git difftool --no-prompt --gui branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
test_expect_success PERL 'difftool --gui works without configured diff.guitool' '
git config diff.tool test-tool &&
diff=$(git difftool --no-prompt --gui branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Specify the diff tool using $GIT_DIFF_TOOL
test_expect_success PERL 'GIT_DIFF_TOOL variable' '
test_might_fail git config --unset diff.tool &&
GIT_DIFF_TOOL=test-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test the $GIT_*_TOOL variables and ensure
# that $GIT_DIFF_TOOL always wins unless --tool is specified
test_expect_success PERL 'GIT_DIFF_TOOL overrides' '
git config diff.tool bogus-tool &&
git config merge.tool bogus-tool &&
GIT_DIFF_TOOL=test-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
GIT_DIFF_TOOL=bogus-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt --tool=test-tool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt to difftool
# when $GIT_DIFFTOOL_NO_PROMPT is true
test_expect_success PERL 'GIT_DIFFTOOL_NO_PROMPT variable' '
GIT_DIFFTOOL_NO_PROMPT=true &&
export GIT_DIFFTOOL_NO_PROMPT &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# git-difftool supports the difftool.prompt variable.
# Test that GIT_DIFFTOOL_PROMPT can override difftool.prompt = false
test_expect_success PERL 'GIT_DIFFTOOL_PROMPT variable' '
git config difftool.prompt false &&
GIT_DIFFTOOL_PROMPT=true &&
export GIT_DIFFTOOL_PROMPT &&
prompt=$(echo | git difftool branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt when difftool.prompt is false
test_expect_success PERL 'difftool.prompt config variable is false' '
git config difftool.prompt false &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt when mergetool.prompt is false
test_expect_success PERL 'difftool merge.prompt = false' '
test_might_fail git config --unset difftool.prompt &&
git config mergetool.prompt false &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that the -y flag can override difftool.prompt = true
test_expect_success PERL 'difftool.prompt can overridden with -y' '
git config difftool.prompt true &&
diff=$(git difftool -y branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that the --prompt flag can override difftool.prompt = false
test_expect_success PERL 'difftool.prompt can overridden with --prompt' '
git config difftool.prompt false &&
prompt=$(echo | git difftool --prompt branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# Test that the last flag passed on the command-line wins
test_expect_success PERL 'difftool last flag wins' '
diff=$(git difftool --prompt --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults &&
prompt=$(echo | git difftool --no-prompt --prompt branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# git-difftool falls back to git-mergetool config variables
# so test that behavior here
test_expect_success PERL 'difftool + mergetool config variables' '
remove_config_vars &&
git config merge.tool test-tool &&
git config mergetool.test-tool.cmd "cat \$LOCAL" &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
# set merge.tool to something bogus, diff.tool to test-tool
git config merge.tool bogus-tool &&
git config diff.tool test-tool &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
test_expect_success PERL 'difftool.<tool>.path' '
git config difftool.tkdiff.path echo &&
diff=$(git difftool --tool=tkdiff --no-prompt branch) &&
git config --unset difftool.tkdiff.path &&
lines=$(echo "$diff" | grep file | wc -l) &&
test "$lines" -eq 1 &&
restore_test_defaults
'
test_expect_success PERL 'difftool --extcmd=cat' '
diff=$(git difftool --no-prompt --extcmd=cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success PERL 'difftool --extcmd cat' '
diff=$(git difftool --no-prompt --extcmd cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success PERL 'difftool -x cat' '
diff=$(git difftool --no-prompt -x cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success PERL 'difftool --extcmd echo arg1' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"echo\ \$1\" branch) &&
test "$diff" = file
'
test_expect_success PERL 'difftool --extcmd cat arg1' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"cat\ \$1\" branch) &&
test "$diff" = master
'
test_expect_success PERL 'difftool --extcmd cat arg2' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"cat\ \$2\" branch) &&
test "$diff" = branch
'
test_done
|
<?php
namespace Drupal\FunctionalTests\Rest;
use Drupal\Tests\rest\Functional\BasicAuthResourceTestTrait;
use Drupal\Tests\rest\Functional\EntityResource\XmlEntityNormalizationQuirksTrait;
/**
* @group rest
*/
class BaseFieldOverrideXmlBasicAuthTest extends BaseFieldOverrideResourceTestBase {
use BasicAuthResourceTestTrait;
use XmlEntityNormalizationQuirksTrait;
/**
* {@inheritdoc}
*/
public static $modules = ['basic_auth'];
/**
* {@inheritdoc}
*/
protected static $format = 'xml';
/**
* {@inheritdoc}
*/
protected static $mimeType = 'text/xml; charset=UTF-8';
/**
* {@inheritdoc}
*/
protected $defaultTheme = 'stark';
/**
* {@inheritdoc}
*/
protected static $auth = 'basic_auth';
}
|
<?php
define('WP_ADMIN', TRUE);
if ( defined('ABSPATH') )
require_once(ABSPATH . 'wp-load.php');
else
require_once('../wp-load.php');
require_once(ABSPATH . 'wp-admin/includes/admin.php');
if ( !wp_validate_auth_cookie() )
wp_die(__('Cheatin’ uh?'));
nocache_headers();
do_action('admin_init');
$action = 'admin_post';
if ( !empty($_REQUEST['action']) )
$action .= '_' . $_REQUEST['action'];
do_action($action);
?> |
/*
* kernel/workqueue.c - generic async execution with shared worker pool
*
* Copyright (C) 2002 Ingo Molnar
*
* Derived from the taskqueue/keventd code by:
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
* automatically managed. There is one worker pool for each CPU and
* one extra for works which are better served by workers which are
* not bound to any specific CPU.
*
* Please read Documentation/workqueue.txt for details.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <mach/sec_debug.h>
#include "workqueue_sched.h"
enum {
/* global_cwq flags */
GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
GCWQ_FREEZING = 1 << 3, /* freeze in progress */
GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
/* worker flags */
WORKER_STARTED = 1 << 0, /* started */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
WORKER_REBIND = 1 << 5, /* mom is home, come back */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
/* gcwq->trustee_state */
TRUSTEE_START = 0, /* start */
TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
TRUSTEE_BUTCHER = 2, /* butcher workers */
TRUSTEE_RELEASE = 3, /* release workers */
TRUSTEE_DONE = 4, /* trustee is done */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
/* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give -20.
*/
RESCUER_NICE_LEVEL = -20,
};
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only for
* everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
*
* L: gcwq->lock protected. Access with gcwq->lock held.
*
* X: During normal operation, modification requires gcwq->lock and
* should be done only from local cpu. Either disabling preemption
* on local cpu or grabbing gcwq->lock is enough for read access.
* If GCWQ_DISASSOCIATED is set, it's identical to L.
*
* F: wq->flush_mutex protected.
*
* W: workqueue_lock protected.
*/
struct global_cwq;
/*
* The poor guys doing the actual heavy lifting. All on-duty workers
* are either serving the manager role, on idle list or on busy hash.
*/
struct worker {
/* on idle list while idle, on busy hash table while busy */
union {
struct list_head entry; /* L: while idle */
struct hlist_node hentry; /* L: while busy */
};
struct work_struct *current_work; /* L: work being processed */
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct global_cwq *gcwq; /* I: the associated gcwq */
/* 64 bytes boundary on 64bit, 32 on 32bit */
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
struct work_struct rebind_work; /* L: rebind worker to cpu */
};
/*
* Global per-cpu workqueue. There's one and only one for each cpu
* and all works are queued and processed here regardless of their
* target workqueues.
*/
struct global_cwq {
spinlock_t lock; /* the gcwq lock */
struct list_head worklist; /* L: list of pending works */
unsigned int cpu; /* I: the associated cpu */
unsigned int flags; /* L: GCWQ_* flags */
int nr_workers; /* L: total number of workers */
int nr_idle; /* L: currently idle ones */
/* workers are chained either in the idle_list or busy_hash */
struct list_head idle_list; /* X: list of idle workers */
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
/* L: hash of busy workers */
struct timer_list idle_timer; /* L: worker idle timeout */
struct timer_list mayday_timer; /* L: SOS timer for dworkers */
struct ida worker_ida; /* L: for worker IDs */
struct task_struct *trustee; /* L: for gcwq shutdown */
unsigned int trustee_state; /* L: trustee state */
wait_queue_head_t trustee_wait; /* trustee wait */
struct worker *first_idle; /* L: first idle worker */
} ____cacheline_aligned_in_smp;
/*
* The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
* work_struct->data are used for flags and thus cwqs need to be
* aligned at two's power of the number of flag bits.
*/
struct cpu_workqueue_struct {
struct global_cwq *gcwq; /* I: the associated gcwq */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
};
/*
* Structure used to wait for workqueue flush.
*/
struct wq_flusher {
struct list_head list; /* F: list of flushers */
int flush_color; /* F: flush color waiting for */
struct completion done; /* flush completion */
};
/*
* All cpumasks are assumed to be always set on UP and thus can't be
* used to determine whether there's something to be done.
*/
#ifdef CONFIG_SMP
typedef cpumask_var_t mayday_mask_t;
#define mayday_test_and_set_cpu(cpu, mask) \
cpumask_test_and_set_cpu((cpu), (mask))
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
#define free_mayday_mask(mask) free_cpumask_var((mask))
#else
typedef unsigned long mayday_mask_t;
#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
#define alloc_mayday_mask(maskp, gfp) true
#define free_mayday_mask(mask) do { } while (0)
#endif
/*
* The externally visible workqueue abstraction is an array of
* per-CPU workqueues:
*/
struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
unsigned long v;
} cpu_wq; /* I: cwq's */
struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */
int flush_color; /* F: current flush color */
atomic_t nr_cwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
char name[]; /* I: workqueue name */
};
struct workqueue_struct *system_wq __read_mostly;
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
struct workqueue_struct *system_unbound_wq __read_mostly;
struct workqueue_struct *system_freezable_wq __read_mostly;
struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
EXPORT_SYMBOL_GPL(system_unbound_wq);
EXPORT_SYMBOL_GPL(system_freezable_wq);
EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
#define for_each_busy_worker(worker, i, pos, gcwq) \
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw)
{
if (cpu < nr_cpu_ids) {
if (sw & 1) {
cpu = cpumask_next(cpu, mask);
if (cpu < nr_cpu_ids)
return cpu;
}
if (sw & 2)
return WORK_CPU_UNBOUND;
}
return WORK_CPU_NONE;
}
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
struct workqueue_struct *wq)
{
return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
}
/*
* CPU iterators
*
* An extra gcwq is defined for an invalid cpu number
* (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
* specific CPU. The following iterators are similar to
* for_each_*_cpu() iterators but also considers the unbound gcwq.
*
* for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
* for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
* for_each_cwq_cpu() : possible CPUs for bound workqueues,
* WORK_CPU_UNBOUND for unbound workqueues
*/
#define for_each_gcwq_cpu(cpu) \
for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
#define for_each_online_gcwq_cpu(cpu) \
for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
#define for_each_cwq_cpu(cpu, wq) \
for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
(cpu) < WORK_CPU_NONE; \
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
static void *work_debug_hint(void *addr)
{
return ((struct work_struct *) addr)->func;
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static int work_fixup_init(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_init(work, &work_debug_descr);
return 1;
default:
return 0;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
*/
static int work_fixup_activate(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
/*
* This is not really a fixup. The work struct was
* statically initialized. We just make sure that it
* is tracked in the object tracker.
*/
if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
debug_object_init(work, &work_debug_descr);
debug_object_activate(work, &work_debug_descr);
return 0;
}
WARN_ON_ONCE(1);
return 0;
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
default:
return 0;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static int work_fixup_free(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_free(work, &work_debug_descr);
return 1;
default:
return 0;
}
}
static struct debug_obj_descr work_debug_descr = {
.name = "work_struct",
.debug_hint = work_debug_hint,
.fixup_init = work_fixup_init,
.fixup_activate = work_fixup_activate,
.fixup_free = work_fixup_free,
};
static inline void debug_work_activate(struct work_struct *work)
{
debug_object_activate(work, &work_debug_descr);
}
static inline void debug_work_deactivate(struct work_struct *work)
{
debug_object_deactivate(work, &work_debug_descr);
}
void __init_work(struct work_struct *work, int onstack)
{
if (onstack)
debug_object_init_on_stack(work, &work_debug_descr);
else
debug_object_init(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(__init_work);
void destroy_work_on_stack(struct work_struct *work)
{
debug_object_free(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
#else
static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
/* Serializes the accesses to the list of workqueues. */
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static bool workqueue_freezing; /* W: have wqs started freezing? */
/*
* The almighty global cpu workqueues. nr_running is the only field
* which is expected to be used frequently by other cpus via
* try_to_wake_up(). Put it in a separate cacheline.
*/
static DEFINE_PER_CPU(struct global_cwq, global_cwq);
static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
/*
* Global cpu workqueue and nr_running counter for unbound gcwq. The
* gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
* workers have WORKER_UNBOUND set.
*/
static struct global_cwq unbound_global_cwq;
static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
static int worker_thread(void *__worker);
static struct global_cwq *get_gcwq(unsigned int cpu)
{
if (cpu != WORK_CPU_UNBOUND)
return &per_cpu(global_cwq, cpu);
else
return &unbound_global_cwq;
}
static atomic_t *get_gcwq_nr_running(unsigned int cpu)
{
if (cpu != WORK_CPU_UNBOUND)
return &per_cpu(gcwq_nr_running, cpu);
else
return &unbound_gcwq_nr_running;
}
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids))
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
} else if (likely(cpu == WORK_CPU_UNBOUND))
return wq->cpu_wq.single;
return NULL;
}
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
}
static int get_work_color(struct work_struct *work)
{
return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
((1 << WORK_STRUCT_COLOR_BITS) - 1);
}
static int work_next_color(int color)
{
return (color + 1) % WORK_NR_COLORS;
}
/*
* A work's data points to the cwq with WORK_STRUCT_CWQ set while the
* work is on queue. Once execution starts, WORK_STRUCT_CWQ is
* cleared and the work data contains the cpu number it was last on.
*
* set_work_{cwq|cpu}() and clear_work_data() can be used to set the
* cwq, cpu or clear work->data. These functions should only be
* called while the work is owned - ie. while the PENDING bit is set.
*
* get_work_[g]cwq() can be used to obtain the gcwq or cwq
* corresponding to a work. gcwq is available once the work has been
* queued anywhere after initialization. cwq is available only from
* queueing until execution starts.
*/
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
BUG_ON(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
static void set_work_cwq(struct work_struct *work,
struct cpu_workqueue_struct *cwq,
unsigned long extra_flags)
{
set_work_data(work, (unsigned long)cwq,
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
}
static void set_work_cpu(struct work_struct *work, unsigned int cpu)
{
set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
}
static void clear_work_data(struct work_struct *work)
{
set_work_data(work, WORK_STRUCT_NO_CPU, 0);
}
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ)
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
else
return NULL;
}
static struct global_cwq *get_work_gcwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
unsigned int cpu;
if (data & WORK_STRUCT_CWQ)
return ((struct cpu_workqueue_struct *)
(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
cpu = data >> WORK_STRUCT_FLAG_BITS;
if (cpu == WORK_CPU_NONE)
return NULL;
BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
return get_gcwq(cpu);
}
/*
* Policy functions. These define the policies on how the global
* worker pool is managed. Unless noted otherwise, these functions
* assume that they're being called with gcwq->lock held.
*/
static bool __need_more_worker(struct global_cwq *gcwq)
{
return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
gcwq->flags & GCWQ_HIGHPRI_PENDING;
}
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
*/
static bool need_more_worker(struct global_cwq *gcwq)
{
return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
}
/* Can I start working? Called from busy but !running workers. */
static bool may_start_working(struct global_cwq *gcwq)
{
return gcwq->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct global_cwq *gcwq)
{
atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
return !list_empty(&gcwq->worklist) &&
(atomic_read(nr_running) <= 1 ||
gcwq->flags & GCWQ_HIGHPRI_PENDING);
}
/* Do we need a new worker? Called from manager. */
static bool need_to_create_worker(struct global_cwq *gcwq)
{
return need_more_worker(gcwq) && !may_start_working(gcwq);
}
/* Do I need to be the manager? */
static bool need_to_manage_workers(struct global_cwq *gcwq)
{
return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
}
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct global_cwq *gcwq)
{
bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
int nr_busy = gcwq->nr_workers - nr_idle;
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
/*
* Wake up functions.
*/
/* Return the first worker. Safe with preemption disabled */
static struct worker *first_worker(struct global_cwq *gcwq)
{
if (unlikely(list_empty(&gcwq->idle_list)))
return NULL;
return list_first_entry(&gcwq->idle_list, struct worker, entry);
}
/**
* wake_up_worker - wake up an idle worker
* @gcwq: gcwq to wake worker for
*
* Wake up the first idle worker of @gcwq.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void wake_up_worker(struct global_cwq *gcwq)
{
struct worker *worker = first_worker(gcwq);
if (likely(worker))
wake_up_process(worker->task);
}
/**
* wq_worker_waking_up - a worker is waking up
* @task: task waking up
* @cpu: CPU @task is waking up to
*
* This function is called during try_to_wake_up() when a worker is
* being awoken.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*/
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
{
struct worker *worker = kthread_data(task);
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(get_gcwq_nr_running(cpu));
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
* @cpu: CPU in question, must be the current CPU number
*
* This function is called during schedule() when a busy worker is
* going to sleep. Worker on the same cpu can be woken up by
* returning pointer to its task.
*
* CONTEXT:
* spin_lock_irq(rq->lock)
*
* RETURNS:
* Worker task on @cpu to wake up, %NULL if none.
*/
struct task_struct *wq_worker_sleeping(struct task_struct *task,
unsigned int cpu)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct global_cwq *gcwq = get_gcwq(cpu);
atomic_t *nr_running = get_gcwq_nr_running(cpu);
if (worker->flags & WORKER_NOT_RUNNING)
return NULL;
/* this can only happen on the local cpu */
BUG_ON(cpu != raw_smp_processor_id());
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
*
* NOT_RUNNING is clear. This means that trustee is not in
* charge and we're running on the local cpu w/ rq lock held
* and preemption disabled, which in turn means that none else
* could be manipulating idle_list, so dereferencing idle_list
* without gcwq lock is safe.
*/
if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
to_wakeup = first_worker(gcwq);
return to_wakeup ? to_wakeup->task : NULL;
}
/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
* @wakeup: wakeup an idle worker if necessary
*
* Set @flags in @worker->flags and adjust nr_running accordingly. If
* nr_running becomes zero and @wakeup is %true, an idle worker is
* woken up.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock)
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags,
bool wakeup)
{
struct global_cwq *gcwq = worker->gcwq;
WARN_ON_ONCE(worker->task != current);
/*
* If transitioning into NOT_RUNNING, adjust nr_running and
* wake up an idle worker as necessary if requested by
* @wakeup.
*/
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
if (wakeup) {
if (atomic_dec_and_test(nr_running) &&
!list_empty(&gcwq->worklist))
wake_up_worker(gcwq);
} else
atomic_dec(nr_running);
}
worker->flags |= flags;
}
/**
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to clear
*
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock)
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
struct global_cwq *gcwq = worker->gcwq;
unsigned int oflags = worker->flags;
WARN_ON_ONCE(worker->task != current);
worker->flags &= ~flags;
/*
* If transitioning out of NOT_RUNNING, increment nr_running. Note
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
* of multiple flags, not a single flag.
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(get_gcwq_nr_running(gcwq->cpu));
}
/**
* busy_worker_head - return the busy hash head for a work
* @gcwq: gcwq of interest
* @work: work to be hashed
*
* Return hash head of @gcwq for @work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to the hash head.
*/
static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
struct work_struct *work)
{
const int base_shift = ilog2(sizeof(struct work_struct));
unsigned long v = (unsigned long)work;
/* simple shift and fold hash, do we need something better? */
v >>= base_shift;
v += v >> BUSY_WORKER_HASH_ORDER;
v &= BUSY_WORKER_HASH_MASK;
return &gcwq->busy_hash[v];
}
/**
* __find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
* @bwh: hash head as returned by busy_worker_head()
* @work: work to find worker for
*
* Find a worker which is executing @work on @gcwq. @bwh should be
* the hash head obtained by calling busy_worker_head() with the same
* work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to worker which is executing @work if found, NULL
* otherwise.
*/
static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
struct hlist_head *bwh,
struct work_struct *work)
{
struct worker *worker;
struct hlist_node *tmp;
hlist_for_each_entry(worker, tmp, bwh, hentry)
if (worker->current_work == work)
return worker;
return NULL;
}
/**
* find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
* @work: work to find worker for
*
* Find a worker which is executing @work on @gcwq. This function is
* identical to __find_worker_executing_work() except that this
* function calculates @bwh itself.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to worker which is executing @work if found, NULL
* otherwise.
*/
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
struct work_struct *work)
{
return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
work);
}
/**
* gcwq_determine_ins_pos - find insertion position
* @gcwq: gcwq of interest
* @cwq: cwq a work is being queued for
*
* A work for @cwq is about to be queued on @gcwq, determine insertion
* position for the work. If @cwq is for HIGHPRI wq, the work is
* queued at the head of the queue but in FIFO order with respect to
* other HIGHPRI works; otherwise, at the end of the queue. This
* function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
* there are HIGHPRI works pending.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to inserstion position.
*/
static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
struct cpu_workqueue_struct *cwq)
{
struct work_struct *twork;
if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
return &gcwq->worklist;
list_for_each_entry(twork, &gcwq->worklist, entry) {
struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
if (!(tcwq->wq->flags & WQ_HIGHPRI))
break;
}
gcwq->flags |= GCWQ_HIGHPRI_PENDING;
return &twork->entry;
}
/**
* insert_work - insert a work into gcwq
* @cwq: cwq @work belongs to
* @work: work to insert
* @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set
*
* Insert @work which belongs to @cwq into @gcwq after @head.
* @extra_flags is or'd to work_struct flags.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
{
struct global_cwq *gcwq = cwq->gcwq;
/* we own @work, set data and link */
set_work_cwq(work, cwq, extra_flags);
/*
* Ensure that we get the right work->data if we see the
* result of list_add() below, see try_to_grab_pending().
*/
smp_wmb();
list_add_tail(&work->entry, head);
/*
* Ensure either worker_sched_deactivated() sees the above
* list_add_tail() or we see zero nr_running to avoid workers
* lying around lazily while there are works to be processed.
*/
smp_mb();
if (__need_more_worker(gcwq))
wake_up_worker(gcwq);
}
/*
* Test whether @work is being queued from another work executing on the
* same workqueue. This is rather expensive and should only be used from
* cold paths.
*/
static bool is_chained_work(struct workqueue_struct *wq)
{
unsigned long flags;
unsigned int cpu;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
struct hlist_node *pos;
int i;
spin_lock_irqsave(&gcwq->lock, flags);
for_each_busy_worker(worker, i, pos, gcwq) {
if (worker->task != current)
continue;
spin_unlock_irqrestore(&gcwq->lock, flags);
/*
* I'm @worker, no locking necessary. See if @work
* is headed to the same workqueue.
*/
return worker->current_cwq->wq == wq;
}
spin_unlock_irqrestore(&gcwq->lock, flags);
}
return false;
}
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
unsigned int work_flags;
unsigned long flags;
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
if (unlikely(wq->flags & WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
/* determine gcwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *last_gcwq;
if (unlikely(cpu == WORK_CPU_UNBOUND))
cpu = raw_smp_processor_id();
/*
* It's multi cpu. If @wq is non-reentrant and @work
* was previously on a different cpu, it might still
* be running there, in which case the work needs to
* be queued on that cpu to guarantee non-reentrance.
*/
gcwq = get_gcwq(cpu);
if (wq->flags & WQ_NON_REENTRANT &&
(last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
struct worker *worker;
spin_lock_irqsave(&last_gcwq->lock, flags);
worker = find_worker_executing_work(last_gcwq, work);
if (worker && worker->current_cwq->wq == wq)
gcwq = last_gcwq;
else {
/* meh... not running there, queue here */
spin_unlock_irqrestore(&last_gcwq->lock, flags);
spin_lock_irqsave(&gcwq->lock, flags);
}
} else
spin_lock_irqsave(&gcwq->lock, flags);
} else {
gcwq = get_gcwq(WORK_CPU_UNBOUND);
spin_lock_irqsave(&gcwq->lock, flags);
}
/* gcwq determined, get cwq and queue */
cwq = get_cwq(gcwq->cpu, wq);
trace_workqueue_queue_work(cpu, cwq, work);
BUG_ON(!list_empty(&work->entry));
cwq->nr_in_flight[cwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) {
trace_workqueue_activate_work(work);
cwq->nr_active++;
worklist = gcwq_determine_ins_pos(gcwq, cwq);
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
}
insert_work(cwq, work, worklist, work_flags);
spin_unlock_irqrestore(&gcwq->lock, flags);
}
/**
* queue_work - queue work on a workqueue
* @wq: workqueue to use
* @work: work to queue
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
*/
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
ret = queue_work_on(get_cpu(), wq, work);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
/**
* queue_work_on - queue work on specific cpu
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @work: work to queue
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
* We queue the work to a specific CPU, the caller must ensure it
* can't go away.
*/
int
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(queue_work_on);
static void delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
}
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
* @dwork: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
if (delay == 0)
return queue_work(wq, &dwork->work);
return queue_delayed_work_on(-1, wq, dwork, delay);
}
EXPORT_SYMBOL_GPL(queue_delayed_work);
/**
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
timer_stats_timer_set_start_info(&dwork->timer);
/*
* This stores cwq for the moment, for the timer_fn.
* Note that the work's gcwq is preserved to allow
* reentrance detection for delayed works.
*/
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *gcwq = get_work_gcwq(work);
if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
lcpu = gcwq->cpu;
else
lcpu = raw_smp_processor_id();
} else
lcpu = WORK_CPU_UNBOUND;
set_work_cwq(work, get_cwq(lcpu, wq), 0);
timer->expires = jiffies + delay;
timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
if (unlikely(cpu >= 0))
add_timer_on(timer, cpu);
else
add_timer(timer);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
/**
* worker_enter_idle - enter idle state
* @worker: worker which is entering idle state
*
* @worker is entering idle state. Update stats and idle timer if
* necessary.
*
* LOCKING:
* spin_lock_irq(gcwq->lock).
*/
static void worker_enter_idle(struct worker *worker)
{
struct global_cwq *gcwq = worker->gcwq;
BUG_ON(worker->flags & WORKER_IDLE);
BUG_ON(!list_empty(&worker->entry) &&
(worker->hentry.next || worker->hentry.pprev));
/* can't use worker_set_flags(), also called from start_worker() */
worker->flags |= WORKER_IDLE;
gcwq->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
list_add(&worker->entry, &gcwq->idle_list);
if (likely(!(worker->flags & WORKER_ROGUE))) {
if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
mod_timer(&gcwq->idle_timer,
jiffies + IDLE_WORKER_TIMEOUT);
} else
wake_up_all(&gcwq->trustee_wait);
/*
* Sanity check nr_running. Because trustee releases gcwq->lock
* between setting %WORKER_ROGUE and zapping nr_running, the
* warning may trigger spuriously. Check iff trustee is idle.
*/
WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
gcwq->nr_workers == gcwq->nr_idle &&
atomic_read(get_gcwq_nr_running(gcwq->cpu)));
}
/**
* worker_leave_idle - leave idle state
* @worker: worker which is leaving idle state
*
* @worker is leaving idle state. Update stats.
*
* LOCKING:
* spin_lock_irq(gcwq->lock).
*/
static void worker_leave_idle(struct worker *worker)
{
struct global_cwq *gcwq = worker->gcwq;
BUG_ON(!(worker->flags & WORKER_IDLE));
worker_clr_flags(worker, WORKER_IDLE);
gcwq->nr_idle--;
list_del_init(&worker->entry);
}
/**
* worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
* @worker: self
*
* Works which are scheduled while the cpu is online must at least be
* scheduled to a worker which is bound to the cpu so that if they are
* flushed from cpu callbacks while cpu is going down, they are
* guaranteed to execute on the cpu.
*
* This function is to be used by rogue workers and rescuers to bind
* themselves to the target cpu and may race with cpu going down or
* coming online. kthread_bind() can't be used because it may put the
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
* verbatim as it's best effort and blocking and gcwq may be
* [dis]associated in the meantime.
*
* This function tries set_cpus_allowed() and locks gcwq and verifies
* the binding against GCWQ_DISASSOCIATED which is set during
* CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
* idle state or fetches works without dropping lock, it can guarantee
* the scheduling requirement described in the first paragraph.
*
* CONTEXT:
* Might sleep. Called without any lock but returns with gcwq->lock
* held.
*
* RETURNS:
* %true if the associated gcwq is online (@worker is successfully
* bound), %false if offline.
*/
static bool worker_maybe_bind_and_lock(struct worker *worker)
__acquires(&gcwq->lock)
{
struct global_cwq *gcwq = worker->gcwq;
struct task_struct *task = worker->task;
while (true) {
/*
* The following call may fail, succeed or succeed
* without actually migrating the task to the cpu if
* it races with cpu hotunplug operation. Verify
* against GCWQ_DISASSOCIATED.
*/
if (!(gcwq->flags & GCWQ_DISASSOCIATED))
set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
spin_lock_irq(&gcwq->lock);
if (gcwq->flags & GCWQ_DISASSOCIATED)
return false;
if (task_cpu(task) == gcwq->cpu &&
cpumask_equal(¤t->cpus_allowed,
get_cpu_mask(gcwq->cpu)))
return true;
spin_unlock_irq(&gcwq->lock);
/*
* We've raced with CPU hot[un]plug. Give it a breather
* and retry migration. cond_resched() is required here;
* otherwise, we might deadlock against cpu_stop trying to
* bring down the CPU on non-preemptive kernel.
*/
cpu_relax();
cond_resched();
}
}
/*
* Function for worker->rebind_work used to rebind rogue busy workers
* to the associated cpu which is coming back online. This is
* scheduled by cpu up but can race with other cpu hotplug operations
* and may be executed twice without intervening cpu down.
*/
static void worker_rebind_fn(struct work_struct *work)
{
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->gcwq;
if (worker_maybe_bind_and_lock(worker))
worker_clr_flags(worker, WORKER_REBIND);
spin_unlock_irq(&gcwq->lock);
}
static struct worker *alloc_worker(void)
{
struct worker *worker;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
INIT_WORK(&worker->rebind_work, worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
return worker;
}
/**
* create_worker - create a new workqueue worker
* @gcwq: gcwq the new worker will belong to
* @bind: whether to set affinity to @cpu or not
*
* Create a new worker which is bound to @gcwq. The returned worker
* can be started by calling start_worker() or destroyed using
* destroy_worker().
*
* CONTEXT:
* Might sleep. Does GFP_KERNEL allocations.
*
* RETURNS:
* Pointer to the newly created worker.
*/
static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
{
bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
struct worker *worker = NULL;
int id = -1;
spin_lock_irq(&gcwq->lock);
while (ida_get_new(&gcwq->worker_ida, &id)) {
spin_unlock_irq(&gcwq->lock);
if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
goto fail;
spin_lock_irq(&gcwq->lock);
}
spin_unlock_irq(&gcwq->lock);
worker = alloc_worker();
if (!worker)
goto fail;
worker->gcwq = gcwq;
worker->id = id;
if (!on_unbound_cpu)
worker->task = kthread_create_on_node(worker_thread,
worker,
cpu_to_node(gcwq->cpu),
"kworker/%u:%d", gcwq->cpu, id);
else
worker->task = kthread_create(worker_thread, worker,
"kworker/u:%d", id);
if (IS_ERR(worker->task))
goto fail;
/*
* A rogue worker will become a regular one if CPU comes
* online later on. Make sure every worker has
* PF_THREAD_BOUND set.
*/
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}
return worker;
fail:
if (id >= 0) {
spin_lock_irq(&gcwq->lock);
ida_remove(&gcwq->worker_ida, id);
spin_unlock_irq(&gcwq->lock);
}
kfree(worker);
return NULL;
}
/**
* start_worker - start a newly created worker
* @worker: worker to start
*
* Make the gcwq aware of @worker and start it.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void start_worker(struct worker *worker)
{
worker->flags |= WORKER_STARTED;
worker->gcwq->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
}
/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
* Destroy @worker and adjust @gcwq stats accordingly.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void destroy_worker(struct worker *worker)
{
struct global_cwq *gcwq = worker->gcwq;
int id = worker->id;
/* sanity check frenzy */
BUG_ON(worker->current_work);
BUG_ON(!list_empty(&worker->scheduled));
if (worker->flags & WORKER_STARTED)
gcwq->nr_workers--;
if (worker->flags & WORKER_IDLE)
gcwq->nr_idle--;
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
spin_unlock_irq(&gcwq->lock);
kthread_stop(worker->task);
kfree(worker);
spin_lock_irq(&gcwq->lock);
ida_remove(&gcwq->worker_ida, id);
}
static void idle_worker_timeout(unsigned long __gcwq)
{
struct global_cwq *gcwq = (void *)__gcwq;
spin_lock_irq(&gcwq->lock);
if (too_many_workers(gcwq)) {
struct worker *worker;
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires))
mod_timer(&gcwq->idle_timer, expires);
else {
/* it's been idle for too long, wake up manager */
gcwq->flags |= GCWQ_MANAGE_WORKERS;
wake_up_worker(gcwq);
}
}
spin_unlock_irq(&gcwq->lock);
}
static bool send_mayday(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct workqueue_struct *wq = cwq->wq;
unsigned int cpu;
if (!(wq->flags & WQ_RESCUER))
return false;
/* mayday mayday mayday */
cpu = cwq->gcwq->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND)
cpu = 0;
if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
wake_up_process(wq->rescuer->task);
return true;
}
static void gcwq_mayday_timeout(unsigned long __gcwq)
{
struct global_cwq *gcwq = (void *)__gcwq;
struct work_struct *work;
spin_lock_irq(&gcwq->lock);
if (need_to_create_worker(gcwq)) {
/*
* We've been trying to create a new worker but
* haven't been successful. We might be hitting an
* allocation deadlock. Send distress signals to
* rescuers.
*/
list_for_each_entry(work, &gcwq->worklist, entry)
send_mayday(work);
}
spin_unlock_irq(&gcwq->lock);
mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
/**
* maybe_create_worker - create a new worker if necessary
* @gcwq: gcwq to create a new worker for
*
* Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
* have at least one idle worker on return from this function. If
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
* sent to all rescuers with works scheduled on @gcwq to resolve
* possible allocation deadlock.
*
* On return, need_to_create_worker() is guaranteed to be false and
* may_start_working() true.
*
* LOCKING:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
static bool maybe_create_worker(struct global_cwq *gcwq)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
if (!need_to_create_worker(gcwq))
return false;
restart:
spin_unlock_irq(&gcwq->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
while (true) {
struct worker *worker;
worker = create_worker(gcwq, true);
if (worker) {
del_timer_sync(&gcwq->mayday_timer);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
BUG_ON(need_to_create_worker(gcwq));
return true;
}
if (!need_to_create_worker(gcwq))
break;
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(CREATE_COOLDOWN);
if (!need_to_create_worker(gcwq))
break;
}
del_timer_sync(&gcwq->mayday_timer);
spin_lock_irq(&gcwq->lock);
if (need_to_create_worker(gcwq))
goto restart;
return true;
}
/**
* maybe_destroy_worker - destroy workers which have been idle for a while
* @gcwq: gcwq to destroy workers for
*
* Destroy @gcwq workers which have been idle for longer than
* IDLE_WORKER_TIMEOUT.
*
* LOCKING:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Called only from manager.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
static bool maybe_destroy_workers(struct global_cwq *gcwq)
{
bool ret = false;
while (too_many_workers(gcwq)) {
struct worker *worker;
unsigned long expires;
worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
mod_timer(&gcwq->idle_timer, expires);
break;
}
destroy_worker(worker);
ret = true;
}
return ret;
}
/**
* manage_workers - manage worker pool
* @worker: self
*
* Assume the manager role and manage gcwq worker pool @worker belongs
* to. At any given time, there can be only zero or one manager per
* gcwq. The exclusion is handled automatically by this function.
*
* The caller can safely start processing works on false return. On
* true return, it's guaranteed that need_to_create_worker() is false
* and may_start_working() is true.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
* RETURNS:
* false if no action was taken and gcwq->lock stayed locked, true if
* some action was taken.
*/
static bool manage_workers(struct worker *worker)
{
struct global_cwq *gcwq = worker->gcwq;
bool ret = false;
if (gcwq->flags & GCWQ_MANAGING_WORKERS)
return ret;
gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
gcwq->flags |= GCWQ_MANAGING_WORKERS;
/*
* Destroy and then create so that may_start_working() is true
* on return.
*/
ret |= maybe_destroy_workers(gcwq);
ret |= maybe_create_worker(gcwq);
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
/*
* The trustee might be waiting to take over the manager
* position, tell it we're done.
*/
if (unlikely(gcwq->trustee))
wake_up_all(&gcwq->trustee_wait);
return ret;
}
/**
* move_linked_works - move linked works to a list
* @work: start of series of works to be scheduled
* @head: target list to append @work to
* @nextp: out paramter for nested worklist walking
*
* Schedule linked works starting from @work to @head. Work series to
* be scheduled starts at @work and includes any consecutive work with
* WORK_STRUCT_LINKED set in its predecessor.
*
* If @nextp is not NULL, it's updated to point to the next work of
* the last scheduled work. This allows move_linked_works() to be
* nested inside outer list_for_each_entry_safe().
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
{
struct work_struct *n;
/*
* Linked worklist will always end before the end of the list,
* use NULL for list head.
*/
list_for_each_entry_safe_from(work, n, NULL, entry) {
list_move_tail(&work->entry, head);
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
break;
}
/*
* If we're already inside safe list traversal and have moved
* multiple works to the scheduled queue, the next position
* needs to be updated.
*/
if (nextp)
*nextp = n;
}
static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
{
struct work_struct *work = list_first_entry(&cwq->delayed_works,
struct work_struct, entry);
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
trace_workqueue_activate_work(work);
move_linked_works(work, pos, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
}
/**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
* @color: color of work which left the queue
* @delayed: for a delayed work
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
bool delayed)
{
/* ignore uncolored works */
if (color == WORK_NO_COLOR)
return;
cwq->nr_in_flight[color]--;
if (!delayed) {
cwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) {
/* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
}
}
/* is flush in progress and are we at the flushing tip? */
if (likely(cwq->flush_color != color))
return;
/* are there still in-flight works? */
if (cwq->nr_in_flight[color])
return;
/* this cwq is done, clear flush_color */
cwq->flush_color = -1;
/*
* If this was the last cwq, wake up the first flusher. It
* will handle the rest.
*/
if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
complete(&cwq->wq->first_flusher->done);
}
/**
* process_one_work - process single work
* @worker: self
* @work: work to process
*
* Process @work. This function contains all the logics necessary to
* process a single work including synchronization against and
* interaction with other workers on the same cpu, queueing and
* flushing. As long as context requirement is met, any worker can
* call this function to process a work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct global_cwq *gcwq = cwq->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
* inside the function that is called from it, this we need to
* take into account for lockdep too. To avoid bogus "held
* lock freed" warnings as well as problems when looking into
* work->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
/*
* A single work shouldn't be executed concurrently by
* multiple workers on a single cpu. Check whether anyone is
* already processing the work. If so, defer the work to the
* currently executing one.
*/
collision = __find_worker_executing_work(gcwq, bwh, work);
if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, NULL);
return;
}
/* claim and process */
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
worker->current_work = work;
worker->current_cwq = cwq;
work_color = get_work_color(work);
/* record the current cpu number in the work data and dequeue */
set_work_cpu(work, gcwq->cpu);
list_del_init(&work->entry);
/*
* If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
* wake up another worker; otherwise, clear HIGHPRI_PENDING.
*/
if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
struct work_struct *nwork = list_first_entry(&gcwq->worklist,
struct work_struct, entry);
if (!list_empty(&gcwq->worklist) &&
get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
wake_up_worker(gcwq);
else
gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
}
/*
* CPU intensive works don't participate in concurrency
* management. They're the scheduler's responsibility.
*/
if (unlikely(cpu_intensive))
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
spin_unlock_irq(&gcwq->lock);
smp_wmb(); /* paired with test_and_set_bit(PENDING) */
work_clear_pending(work);
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
sec_debug_work_log(worker, work, f, 1);
f(work);
sec_debug_work_log(worker, work, f, 2);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
trace_workqueue_execute_end(work);
lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(), task_pid_nr(current));
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
dump_stack();
}
spin_lock_irq(&gcwq->lock);
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
/* we're done with it, release */
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color, false);
}
/**
* process_scheduled_works - process scheduled works
* @worker: self
*
* Process all scheduled works. Please note that the scheduled list
* may change while processing a work, so this function repeatedly
* fetches a work from the top and executes it.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
{
while (!list_empty(&worker->scheduled)) {
struct work_struct *work = list_first_entry(&worker->scheduled,
struct work_struct, entry);
process_one_work(worker, work);
}
}
/**
* worker_thread - the worker thread function
* @__worker: self
*
* The gcwq worker thread function. There's a single dynamic pool of
* these per each cpu. These workers process all works regardless of
* their specific target workqueue. The only exception is works which
* belong to workqueues with a rescuer which will be explained in
* rescuer_thread().
*/
static int worker_thread(void *__worker)
{
struct worker *worker = __worker;
struct global_cwq *gcwq = worker->gcwq;
/* tell the scheduler that this is a workqueue worker */
worker->task->flags |= PF_WQ_WORKER;
woke_up:
spin_lock_irq(&gcwq->lock);
/* DIE can be set only while we're idle, checking here is enough */
if (worker->flags & WORKER_DIE) {
spin_unlock_irq(&gcwq->lock);
worker->task->flags &= ~PF_WQ_WORKER;
return 0;
}
worker_leave_idle(worker);
recheck:
/* no more worker necessary? */
if (!need_more_worker(gcwq))
goto sleep;
/* do we need to manage? */
if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
goto recheck;
/*
* ->scheduled list can only be filled while a worker is
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
BUG_ON(!list_empty(&worker->scheduled));
/*
* When control reaches this point, we're guaranteed to have
* at least one idle worker or that someone else has already
* assumed the manager role.
*/
worker_clr_flags(worker, WORKER_PREP);
do {
struct work_struct *work =
list_first_entry(&gcwq->worklist,
struct work_struct, entry);
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
/* optimization path, not strictly necessary */
process_one_work(worker, work);
if (unlikely(!list_empty(&worker->scheduled)))
process_scheduled_works(worker);
} else {
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
} while (keep_working(gcwq));
worker_set_flags(worker, WORKER_PREP, false);
sleep:
if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
goto recheck;
/*
* gcwq->lock is held and there's no work to process and no
* need to manage, sleep. Workers are woken up only while
* holding gcwq->lock or from local cpu, so setting the
* current state before releasing gcwq->lock is enough to
* prevent losing any event.
*/
worker_enter_idle(worker);
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&gcwq->lock);
schedule();
goto woke_up;
}
/**
* rescuer_thread - the rescuer thread function
* @__wq: the associated workqueue
*
* Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_RESCUER set.
*
* Regular work processing on a gcwq may block trying to create a new
* worker which uses GFP_KERNEL allocation which has slight chance of
* developing into deadlock if some works currently on the same queue
* need to be processed to satisfy the GFP_KERNEL allocation. This is
* the problem rescuer solves.
*
* When such condition is possible, the gcwq summons rescuers of all
* workqueues which have works queued on the gcwq and let them process
* those works so that forward progress can be guaranteed.
*
* This should happen rarely.
*/
static int rescuer_thread(void *__wq)
{
struct workqueue_struct *wq = __wq;
struct worker *rescuer = wq->rescuer;
struct list_head *scheduled = &rescuer->scheduled;
bool is_unbound = wq->flags & WQ_UNBOUND;
unsigned int cpu;
set_user_nice(current, RESCUER_NICE_LEVEL);
repeat:
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
return 0;
}
/*
* See whether any cpu is asking for help. Unbounded
* workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
*/
for_each_mayday_cpu(cpu, wq->mayday_mask) {
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
struct global_cwq *gcwq = cwq->gcwq;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
mayday_clear_cpu(cpu, wq->mayday_mask);
/* migrate to the target cpu if possible */
rescuer->gcwq = gcwq;
worker_maybe_bind_and_lock(rescuer);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
BUG_ON(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
if (get_work_cwq(work) == cwq)
move_linked_works(work, scheduled, &n);
process_scheduled_works(rescuer);
/*
* Leave this gcwq. If keep_working() is %true, notify a
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
*/
if (keep_working(gcwq))
wake_up_worker(gcwq);
spin_unlock_irq(&gcwq->lock);
}
schedule();
goto repeat;
}
struct wq_barrier {
struct work_struct work;
struct completion done;
};
static void wq_barrier_func(struct work_struct *work)
{
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
complete(&barr->done);
}
/**
* insert_wq_barrier - insert a barrier work
* @cwq: cwq to insert barrier into
* @barr: wq_barrier to insert
* @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing
*
* @barr is linked to @target such that @barr is completed only after
* @target finishes execution. Please note that the ordering
* guarantee is observed only with respect to @target and on the local
* cpu.
*
* Currently, a queued barrier can't be canceled. This is because
* try_to_grab_pending() can't determine whether the work to be
* grabbed is at the head of the queue and thus can't clear LINKED
* flag of the previous work while there must be a valid next work
* after a work with LINKED flag set.
*
* Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine cwq from @target.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
struct list_head *head;
unsigned int linked = 0;
/*
* debugobject calls are safe here even with gcwq->lock locked
* as we know for sure that this will not trigger any of the
* checks and call back into the fixup functions where we
* might deadlock.
*/
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
init_completion(&barr->done);
/*
* If @target is currently being executed, schedule the
* barrier to the worker; otherwise, put it after @target.
*/
if (worker)
head = worker->scheduled.next;
else {
unsigned long *bits = work_data_bits(target);
head = target->entry.next;
/* there can already be other linked works, inherit and set */
linked = *bits & WORK_STRUCT_LINKED;
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
}
debug_work_activate(&barr->work);
insert_work(cwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked);
}
/**
* flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
* @wq: workqueue being flushed
* @flush_color: new flush color, < 0 for no-op
* @work_color: new work color, < 0 for no-op
*
* Prepare cwqs for workqueue flushing.
*
* If @flush_color is non-negative, flush_color on all cwqs should be
* -1. If no cwq has in-flight commands at the specified color, all
* cwq->flush_color's stay at -1 and %false is returned. If any cwq
* has in flight commands, its cwq->flush_color is set to
* @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
* wakeup logic is armed and %true is returned.
*
* The caller should have initialized @wq->first_flusher prior to
* calling this function with non-negative @flush_color. If
* @flush_color is negative, no flush color update is done and %false
* is returned.
*
* If @work_color is non-negative, all cwqs should have the same
* work_color which is previous to @work_color and all will be
* advanced to @work_color.
*
* CONTEXT:
* mutex_lock(wq->flush_mutex).
*
* RETURNS:
* %true if @flush_color >= 0 and there's something to flush. %false
* otherwise.
*/
static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
int flush_color, int work_color)
{
bool wait = false;
unsigned int cpu;
if (flush_color >= 0) {
BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
atomic_set(&wq->nr_cwqs_to_flush, 1);
}
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = cwq->gcwq;
spin_lock_irq(&gcwq->lock);
if (flush_color >= 0) {
BUG_ON(cwq->flush_color != -1);
if (cwq->nr_in_flight[flush_color]) {
cwq->flush_color = flush_color;
atomic_inc(&wq->nr_cwqs_to_flush);
wait = true;
}
}
if (work_color >= 0) {
BUG_ON(work_color != work_next_color(cwq->work_color));
cwq->work_color = work_color;
}
spin_unlock_irq(&gcwq->lock);
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
complete(&wq->first_flusher->done);
return wait;
}
/**
* flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* Forces execution of the workqueue and blocks until its completion.
* This is typically used in driver shutdown handlers.
*
* We sleep until all works which were queued on entry have been handled,
* but we are not livelocked by new incoming ones.
*/
void flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
};
int next_color;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->flush_mutex);
/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);
if (next_color != wq->flush_color) {
/*
* Color space is not full. The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
BUG_ON(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
BUG_ON(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
BUG_ON(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
mutex_unlock(&wq->flush_mutex);
wait_for_completion(&this_flusher.done);
/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
if (wq->first_flusher != &this_flusher)
return;
mutex_lock(&wq->flush_mutex);
/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;
wq->first_flusher = NULL;
BUG_ON(!list_empty(&this_flusher.list));
BUG_ON(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}
BUG_ON(!list_empty(&wq->flusher_overflow) &&
wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue. This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;
wq->work_color = work_next_color(wq->work_color);
list_splice_tail_init(&wq->flusher_overflow,
&wq->flusher_queue);
flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
}
if (list_empty(&wq->flusher_queue)) {
BUG_ON(wq->flush_color != wq->work_color);
break;
}
/*
* Need to flush more colors. Make the next flusher
* the new first flusher and arm cwqs.
*/
BUG_ON(wq->flush_color == wq->work_color);
BUG_ON(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
break;
/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}
out_unlock:
mutex_unlock(&wq->flush_mutex);
}
EXPORT_SYMBOL_GPL(flush_workqueue);
/**
* drain_workqueue - drain a workqueue
* @wq: workqueue to drain
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
* work items on @wq can queue further work items on it. @wq is flushed
* repeatedly until it becomes empty. The number of flushing is detemined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
*/
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
unsigned int cpu;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
spin_lock(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
spin_unlock(&workqueue_lock);
reflush:
flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
bool drained;
spin_lock_irq(&cwq->gcwq->lock);
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
spin_unlock_irq(&cwq->gcwq->lock);
if (drained)
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
wq->name, flush_cnt);
goto reflush;
}
spin_lock(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
spin_unlock(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
return false;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
/*
* See the comment near try_to_grab_pending()->smp_rmb().
* If it was re-queued to a different gcwq under us, we
* are not going to wait.
*/
smp_rmb();
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
} else
goto already_gone;
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);
/*
* If @max_active is 1 or rescuer is in use, flushing another work
* item on the same workqueue may lead to deadlock. Make sure the
* flusher is not running on the same workqueue by verifying write
* access.
*/
if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
lock_map_acquire(&cwq->wq->lockdep_map);
else
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. This function considers
* only the last queueing instance of @work. If @work has been
* enqueued across different CPUs on a non-reentrant workqueue or on
* multiple workqueues, @work might still be executing on return on
* some of the CPUs from earlier queueing.
*
* If @work was queued only on a non-reentrant, ordered or unbound
* workqueue, @work is guaranteed to be idle on return if it hasn't
* been requeued since flush started.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;
if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
EXPORT_SYMBOL_GPL(flush_work);
static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
{
struct wq_barrier barr;
struct worker *worker;
spin_lock_irq(&gcwq->lock);
worker = find_worker_executing_work(gcwq, work);
if (unlikely(worker))
insert_wq_barrier(worker->current_cwq, &barr, work, worker);
spin_unlock_irq(&gcwq->lock);
if (unlikely(worker)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else
return false;
}
static bool wait_on_work(struct work_struct *work)
{
bool ret = false;
int cpu;
might_sleep();
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
for_each_gcwq_cpu(cpu)
ret |= wait_on_cpu_work(get_gcwq(cpu), work);
return ret;
}
/**
* flush_work_sync - wait until a work has finished execution
* @work: the work to flush
*
* Wait until @work has finished execution. On return, it's
* guaranteed that all queueing instances of @work which happened
* before this function is called are finished. In other words, if
* @work hasn't been requeued since this function was called, @work is
* guaranteed to be idle on return.
*
* RETURNS:
* %true if flush_work_sync() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work_sync(struct work_struct *work)
{
struct wq_barrier barr;
bool pending, waited;
/* we'll wait for executions separately, queue barr only if pending */
pending = start_flush_work(work, &barr, false);
/* wait for executions to finish */
waited = wait_on_work(work);
/* wait for the pending one */
if (pending) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
}
return pending || waited;
}
EXPORT_SYMBOL_GPL(flush_work_sync);
/*
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
* so this work can't be re-armed in any way.
*/
static int try_to_grab_pending(struct work_struct *work)
{
struct global_cwq *gcwq;
int ret = -1;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
*/
gcwq = get_work_gcwq(work);
if (!gcwq)
return ret;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
/*
* This work is queued, but perhaps we locked the wrong gcwq.
* In that case we must see the new value after rmb(), see
* insert_work()->wmb().
*/
smp_rmb();
if (gcwq == get_work_gcwq(work)) {
debug_work_deactivate(work);
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work),
get_work_color(work),
*work_data_bits(work) & WORK_STRUCT_DELAYED);
ret = 1;
}
}
spin_unlock_irq(&gcwq->lock);
return ret;
}
static bool __cancel_work_timer(struct work_struct *work,
struct timer_list* timer)
{
int ret;
do {
ret = (timer && likely(del_timer(timer)));
if (!ret)
ret = try_to_grab_pending(work);
wait_on_work(work);
} while (unlikely(ret < 0));
clear_work_data(work);
return ret;
}
/**
* cancel_work_sync - cancel a work and wait for it to finish
* @work: the work to cancel
*
* Cancel @work and wait for its execution to finish. This function
* can be used even if the work re-queues itself or migrates to
* another workqueue. On return from this function, @work is
* guaranteed to be not pending or executing on any CPU.
*
* cancel_work_sync(&delayed_work->work) must not be used for
* delayed_work's. Use cancel_delayed_work_sync() instead.
*
* The caller must ensure that the workqueue on which @work was last
* queued can't be destroyed before this function returns.
*
* RETURNS:
* %true if @work was pending, %false otherwise.
*/
bool cancel_work_sync(struct work_struct *work)
{
return __cancel_work_timer(work, NULL);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
/**
* flush_delayed_work - wait for a dwork to finish executing the last queueing
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* immediate execution. Like flush_work(), this function only
* considers the last queueing instance of @dwork.
*
* RETURNS:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
if (del_timer_sync(&dwork->timer))
__queue_work(raw_smp_processor_id(),
get_work_cwq(&dwork->work)->wq, &dwork->work);
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
/**
* flush_delayed_work_sync - wait for a dwork to finish
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* execution immediately. Other than timer handling, its behavior
* is identical to flush_work_sync().
*
* RETURNS:
* %true if flush_work_sync() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work_sync(struct delayed_work *dwork)
{
if (del_timer_sync(&dwork->timer))
__queue_work(raw_smp_processor_id(),
get_work_cwq(&dwork->work)->wq, &dwork->work);
return flush_work_sync(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work_sync);
/**
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
* @dwork: the delayed work cancel
*
* This is cancel_work_sync() for delayed works.
*
* RETURNS:
* %true if @dwork was pending, %false otherwise.
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
return __cancel_work_timer(&dwork->work, &dwork->timer);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);
/**
* schedule_work - put work task in global workqueue
* @work: job to be done
*
* Returns zero if @work was already on the kernel-global workqueue and
* non-zero otherwise.
*
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
*/
int schedule_work(struct work_struct *work)
{
return queue_work(system_wq, work);
}
EXPORT_SYMBOL(schedule_work);
/*
* schedule_work_on - put work task on a specific cpu
* @cpu: cpu to put the work task on
* @work: job to be done
*
* This puts a job on a specific cpu
*/
int schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, system_wq, work);
}
EXPORT_SYMBOL(schedule_work_on);
/**
* schedule_delayed_work - put work task in global workqueue after delay
* @dwork: job to be done
* @delay: number of jiffies to wait or 0 for immediate execution
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
int schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(system_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
* @delay: number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU.
*/
int schedule_delayed_work_on(int cpu,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work_on(cpu, system_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work_on);
/**
* schedule_on_each_cpu - execute a function synchronously on each online CPU
* @func: the function to call
*
* schedule_on_each_cpu() executes @func on each online CPU using the
* system workqueue and blocks until all CPUs have completed.
* schedule_on_each_cpu() is very slow.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct __percpu *works;
works = alloc_percpu(struct work_struct);
if (!works)
return -ENOMEM;
get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
schedule_work_on(cpu, work);
}
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
put_online_cpus();
free_percpu(works);
return 0;
}
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
* Think twice before calling this function! It's very easy to get into
* trouble if you don't take great care. Either of the following situations
* will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
*
* Your code is running in the context of a work routine.
*
* They will be detected by lockdep when they occur, but the first might not
* occur very often. It depends on what work items are on the workqueue and
* what locks they need, which you have no control over.
*
* In most situations flushing the entire workqueue is overkill; you merely
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
*/
void flush_scheduled_work(void)
{
flush_workqueue(system_wq);
}
EXPORT_SYMBOL(flush_scheduled_work);
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
* Executes the function immediately if process context is available,
* otherwise schedules the function for delayed execution.
*
* Returns: 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
fn(&ew->work);
return 0;
}
INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);
int keventd_up(void)
{
return system_wq != NULL;
}
static int alloc_cwqs(struct workqueue_struct *wq)
{
/*
* cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
* Make sure that the alignment isn't lower than that of
* unsigned long long.
*/
const size_t size = sizeof(struct cpu_workqueue_struct);
const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
__alignof__(unsigned long long));
if (!(wq->flags & WQ_UNBOUND))
wq->cpu_wq.pcpu = __alloc_percpu(size, align);
else {
void *ptr;
/*
* Allocate enough room to align cwq and put an extra
* pointer at the end pointing back to the originally
* allocated pointer which will be used for free.
*/
ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
if (ptr) {
wq->cpu_wq.single = PTR_ALIGN(ptr, align);
*(void **)(wq->cpu_wq.single + 1) = ptr;
}
}
/* just in case, make sure it's actually aligned */
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
return wq->cpu_wq.v ? 0 : -ENOMEM;
}
static void free_cwqs(struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_wq.pcpu);
else if (wq->cpu_wq.single) {
/* the pointer to free is stored right after the cwq */
kfree(*(void **)(wq->cpu_wq.single + 1));
}
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
if (max_active < 1 || max_active > lim)
printk(KERN_WARNING "workqueue: max_active %d requested for %s "
"is out of range, clamping between %d and %d\n",
max_active, name, 1, lim);
return clamp_val(max_active, 1, lim);
}
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
const char *lock_name, ...)
{
va_list args, args1;
struct workqueue_struct *wq;
unsigned int cpu;
size_t namelen;
/* determine namelen, allocate wq and format name */
va_start(args, lock_name);
va_copy(args1, args);
namelen = vsnprintf(NULL, 0, fmt, args) + 1;
wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
if (!wq)
goto err;
vsnprintf(wq->name, namelen, fmt, args1);
va_end(args);
va_end(args1);
/*
* Workqueues which may be used during memory reclaim should
* have a rescuer to guarantee forward progress.
*/
if (flags & WQ_MEM_RECLAIM)
flags |= WQ_RESCUER;
/*
* Unbound workqueues aren't concurrency managed and should be
* dispatched to workers immediately.
*/
if (flags & WQ_UNBOUND)
flags |= WQ_HIGHPRI;
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
if (alloc_cwqs(wq) < 0)
goto err;
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
cwq->gcwq = gcwq;
cwq->wq = wq;
cwq->flush_color = -1;
cwq->max_active = max_active;
INIT_LIST_HEAD(&cwq->delayed_works);
}
if (flags & WQ_RESCUER) {
struct worker *rescuer;
if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
goto err;
wq->rescuer = rescuer = alloc_worker();
if (!rescuer)
goto err;
rescuer->task = kthread_create(rescuer_thread, wq, "%s",
wq->name);
if (IS_ERR(rescuer->task))
goto err;
rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
/*
* workqueue_lock protects global freeze state and workqueues
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq)
get_cwq(cpu, wq)->max_active = 0;
list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
return wq;
err:
if (wq) {
free_cwqs(wq);
free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
kfree(wq);
}
return NULL;
}
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
*
* Safely destroy a workqueue. All work currently pending will be done first.
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
unsigned int cpu;
/* drain it before proceeding with destruction */
drain_workqueue(wq);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
spin_lock(&workqueue_lock);
list_del(&wq->list);
spin_unlock(&workqueue_lock);
/* sanity check */
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
int i;
for (i = 0; i < WORK_NR_COLORS; i++)
BUG_ON(cwq->nr_in_flight[i]);
BUG_ON(cwq->nr_active);
BUG_ON(!list_empty(&cwq->delayed_works));
}
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
}
free_cwqs(wq);
kfree(wq);
}
EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
* workqueue_set_max_active - adjust max_active of a workqueue
* @wq: target workqueue
* @max_active: new max_active value.
*
* Set max_active of @wq to @max_active.
*
* CONTEXT:
* Don't call from IRQ context.
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
unsigned int cpu;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
spin_lock(&workqueue_lock);
wq->saved_max_active = max_active;
for_each_cwq_cpu(cpu, wq) {
struct global_cwq *gcwq = get_gcwq(cpu);
spin_lock_irq(&gcwq->lock);
if (!(wq->flags & WQ_FREEZABLE) ||
!(gcwq->flags & GCWQ_FREEZING))
get_cwq(gcwq->cpu, wq)->max_active = max_active;
spin_unlock_irq(&gcwq->lock);
}
spin_unlock(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
* workqueue_congested - test whether a workqueue is congested
* @cpu: CPU in question
* @wq: target workqueue
*
* Test whether @wq's cpu workqueue for @cpu is congested. There is
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* RETURNS:
* %true if congested, %false otherwise.
*/
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
{
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
return !list_empty(&cwq->delayed_works);
}
EXPORT_SYMBOL_GPL(workqueue_congested);
/**
* work_cpu - return the last known associated cpu for @work
* @work: the work of interest
*
* RETURNS:
* CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
*/
unsigned int work_cpu(struct work_struct *work)
{
struct global_cwq *gcwq = get_work_gcwq(work);
return gcwq ? gcwq->cpu : WORK_CPU_NONE;
}
EXPORT_SYMBOL_GPL(work_cpu);
/**
* work_busy - test whether a work is currently pending or running
* @work: the work to be tested
*
* Test whether @work is currently pending or running. There is no
* synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
* Especially for reentrant wqs, the pending state might hide the
* running state.
*
* RETURNS:
* OR'd bitmask of WORK_BUSY_* bits.
*/
unsigned int work_busy(struct work_struct *work)
{
struct global_cwq *gcwq = get_work_gcwq(work);
unsigned long flags;
unsigned int ret = 0;
if (!gcwq)
return false;
spin_lock_irqsave(&gcwq->lock, flags);
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
if (find_worker_executing_work(gcwq, work))
ret |= WORK_BUSY_RUNNING;
spin_unlock_irqrestore(&gcwq->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
/*
* CPU hotplug.
*
* There are two challenges in supporting CPU hotplug. Firstly, there
* are a lot of assumptions on strong associations among work, cwq and
* gcwq which make migrating pending and scheduled works very
* difficult to implement without impacting hot paths. Secondly,
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
* This is solved by allowing a gcwq to be detached from CPU, running
* it with unbound (rogue) workers and allowing it to be reattached
* later if the cpu comes back online. A separate thread is created
* to govern a gcwq in such state and is called the trustee of the
* gcwq.
*
* Trustee states and their descriptions.
*
* START Command state used on startup. On CPU_DOWN_PREPARE, a
* new trustee is started with this state.
*
* IN_CHARGE Once started, trustee will enter this state after
* assuming the manager role and making all existing
* workers rogue. DOWN_PREPARE waits for trustee to
* enter this state. After reaching IN_CHARGE, trustee
* tries to execute the pending worklist until it's empty
* and the state is set to BUTCHER, or the state is set
* to RELEASE.
*
* BUTCHER Command state which is set by the cpu callback after
* the cpu has went down. Once this state is set trustee
* knows that there will be no new works on the worklist
* and once the worklist is empty it can proceed to
* killing idle workers.
*
* RELEASE Command state which is set by the cpu callback if the
* cpu down has been canceled or it has come online
* again. After recognizing this state, trustee stops
* trying to drain or butcher and clears ROGUE, rebinds
* all remaining workers back to the cpu and releases
* manager role.
*
* DONE Trustee will enter this state after BUTCHER or RELEASE
* is complete.
*
* trustee CPU draining
* took over down complete
* START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
* | | ^
* | CPU is back online v return workers |
* ----------------> RELEASE --------------
*/
/**
* trustee_wait_event_timeout - timed event wait for trustee
* @cond: condition to wait for
* @timeout: timeout in jiffies
*
* wait_event_timeout() for trustee to use. Handles locking and
* checks for RELEASE request.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by trustee.
*
* RETURNS:
* Positive indicating left time if @cond is satisfied, 0 if timed
* out, -1 if canceled.
*/
#define trustee_wait_event_timeout(cond, timeout) ({ \
long __ret = (timeout); \
while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
__ret) { \
spin_unlock_irq(&gcwq->lock); \
__wait_event_timeout(gcwq->trustee_wait, (cond) || \
(gcwq->trustee_state == TRUSTEE_RELEASE), \
__ret); \
spin_lock_irq(&gcwq->lock); \
} \
gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
})
/**
* trustee_wait_event - event wait for trustee
* @cond: condition to wait for
*
* wait_event() for trustee to use. Automatically handles locking and
* checks for CANCEL request.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by trustee.
*
* RETURNS:
* 0 if @cond is satisfied, -1 if canceled.
*/
#define trustee_wait_event(cond) ({ \
long __ret1; \
__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
__ret1 < 0 ? -1 : 0; \
})
static int __cpuinit trustee_thread(void *__gcwq)
{
struct global_cwq *gcwq = __gcwq;
struct worker *worker;
struct work_struct *work;
struct hlist_node *pos;
long rc;
int i;
BUG_ON(gcwq->cpu != smp_processor_id());
spin_lock_irq(&gcwq->lock);
/*
* Claim the manager position and make all workers rogue.
* Trustee must be bound to the target cpu and can't be
* cancelled.
*/
BUG_ON(gcwq->cpu != smp_processor_id());
rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
BUG_ON(rc < 0);
gcwq->flags |= GCWQ_MANAGING_WORKERS;
list_for_each_entry(worker, &gcwq->idle_list, entry)
worker->flags |= WORKER_ROGUE;
for_each_busy_worker(worker, i, pos, gcwq)
worker->flags |= WORKER_ROGUE;
/*
* Call schedule() so that we cross rq->lock and thus can
* guarantee sched callbacks see the rogue flag. This is
* necessary as scheduler callbacks may be invoked from other
* cpus.
*/
spin_unlock_irq(&gcwq->lock);
schedule();
spin_lock_irq(&gcwq->lock);
/*
* Sched callbacks are disabled now. Zap nr_running. After
* this, nr_running stays zero and need_more_worker() and
* keep_working() are always true as long as the worklist is
* not empty.
*/
atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
spin_unlock_irq(&gcwq->lock);
del_timer_sync(&gcwq->idle_timer);
spin_lock_irq(&gcwq->lock);
/*
* We're now in charge. Notify and proceed to drain. We need
* to keep the gcwq running during the whole CPU down
* procedure as other cpu hotunplug callbacks may need to
* flush currently running tasks.
*/
gcwq->trustee_state = TRUSTEE_IN_CHARGE;
wake_up_all(&gcwq->trustee_wait);
/*
* The original cpu is in the process of dying and may go away
* anytime now. When that happens, we and all workers would
* be migrated to other cpus. Try draining any left work. We
* want to get it over with ASAP - spam rescuers, wake up as
* many idlers as necessary and create new ones till the
* worklist is empty. Note that if the gcwq is frozen, there
* may be frozen works in freezable cwqs. Don't declare
* completion while frozen.
*/
while (gcwq->nr_workers != gcwq->nr_idle ||
gcwq->flags & GCWQ_FREEZING ||
gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
int nr_works = 0;
list_for_each_entry(work, &gcwq->worklist, entry) {
send_mayday(work);
nr_works++;
}
list_for_each_entry(worker, &gcwq->idle_list, entry) {
if (!nr_works--)
break;
wake_up_process(worker->task);
}
if (need_to_create_worker(gcwq)) {
spin_unlock_irq(&gcwq->lock);
worker = create_worker(gcwq, false);
spin_lock_irq(&gcwq->lock);
if (worker) {
worker->flags |= WORKER_ROGUE;
start_worker(worker);
}
}
/* give a breather */
if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
break;
}
/*
* Either all works have been scheduled and cpu is down, or
* cpu down has already been canceled. Wait for and butcher
* all workers till we're canceled.
*/
do {
rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
while (!list_empty(&gcwq->idle_list))
destroy_worker(list_first_entry(&gcwq->idle_list,
struct worker, entry));
} while (gcwq->nr_workers && rc >= 0);
/*
* At this point, either draining has completed and no worker
* is left, or cpu down has been canceled or the cpu is being
* brought back up. There shouldn't be any idle one left.
* Tell the remaining busy ones to rebind once it finishes the
* currently scheduled works by scheduling the rebind_work.
*/
WARN_ON(!list_empty(&gcwq->idle_list));
for_each_busy_worker(worker, i, pos, gcwq) {
struct work_struct *rebind_work = &worker->rebind_work;
unsigned long worker_flags = worker->flags;
/*
* Rebind_work may race with future cpu hotplug
* operations. Use a separate flag to mark that
* rebinding is scheduled. The morphing should
* be atomic.
*/
worker_flags |= WORKER_REBIND;
worker_flags &= ~WORKER_ROGUE;
ACCESS_ONCE(worker->flags) = worker_flags;
/* queue rebind_work, wq doesn't matter, use the default one */
if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(rebind_work)))
continue;
debug_work_activate(rebind_work);
insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR));
}
/* relinquish manager role */
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
/* notify completion */
gcwq->trustee = NULL;
gcwq->trustee_state = TRUSTEE_DONE;
wake_up_all(&gcwq->trustee_wait);
spin_unlock_irq(&gcwq->lock);
return 0;
}
/**
* wait_trustee_state - wait for trustee to enter the specified state
* @gcwq: gcwq the trustee of interest belongs to
* @state: target state to wait for
*
* Wait for the trustee to reach @state. DONE is already matched.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock) which may be released and regrabbed
* multiple times. To be used by cpu_callback.
*/
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
if (!(gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE)) {
spin_unlock_irq(&gcwq->lock);
__wait_event(gcwq->trustee_wait,
gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE);
spin_lock_irq(&gcwq->lock);
}
}
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
struct task_struct *new_trustee = NULL;
struct worker *uninitialized_var(new_worker);
unsigned long flags;
action &= ~CPU_TASKS_FROZEN;
switch (action) {
case CPU_DOWN_PREPARE:
new_trustee = kthread_create(trustee_thread, gcwq,
"workqueue_trustee/%d\n", cpu);
if (IS_ERR(new_trustee))
return notifier_from_errno(PTR_ERR(new_trustee));
kthread_bind(new_trustee, cpu);
/* fall through */
case CPU_UP_PREPARE:
BUG_ON(gcwq->first_idle);
new_worker = create_worker(gcwq, false);
if (!new_worker) {
if (new_trustee)
kthread_stop(new_trustee);
return NOTIFY_BAD;
}
}
/* some are called w/ irq disabled, don't disturb irq status */
spin_lock_irqsave(&gcwq->lock, flags);
switch (action) {
case CPU_DOWN_PREPARE:
/* initialize trustee and tell it to acquire the gcwq */
BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
gcwq->trustee = new_trustee;
gcwq->trustee_state = TRUSTEE_START;
wake_up_process(gcwq->trustee);
wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
/* fall through */
case CPU_UP_PREPARE:
BUG_ON(gcwq->first_idle);
gcwq->first_idle = new_worker;
break;
case CPU_DYING:
/*
* Before this, the trustee and all workers except for
* the ones which are still executing works from
* before the last CPU down must be on the cpu. After
* this, they'll all be diasporas.
*/
gcwq->flags |= GCWQ_DISASSOCIATED;
break;
case CPU_POST_DEAD:
gcwq->trustee_state = TRUSTEE_BUTCHER;
/* fall through */
case CPU_UP_CANCELED:
destroy_worker(gcwq->first_idle);
gcwq->first_idle = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_ONLINE:
gcwq->flags &= ~GCWQ_DISASSOCIATED;
if (gcwq->trustee_state != TRUSTEE_DONE) {
gcwq->trustee_state = TRUSTEE_RELEASE;
wake_up_process(gcwq->trustee);
wait_trustee_state(gcwq, TRUSTEE_DONE);
}
/*
* Trustee is done and there might be no worker left.
* Put the first_idle in and request a real manager to
* take a look.
*/
spin_unlock_irq(&gcwq->lock);
kthread_bind(gcwq->first_idle->task, cpu);
spin_lock_irq(&gcwq->lock);
gcwq->flags |= GCWQ_MANAGE_WORKERS;
start_worker(gcwq->first_idle);
gcwq->first_idle = NULL;
break;
}
spin_unlock_irqrestore(&gcwq->lock, flags);
return notifier_from_errno(0);
}
/*
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
*/
static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_UP_CANCELED:
case CPU_DOWN_FAILED:
case CPU_ONLINE:
return workqueue_cpu_callback(nfb, action, hcpu);
}
return NOTIFY_OK;
}
/*
* Workqueues should be brought down after normal priority CPU notifiers.
* This will be registered as low priority CPU notifier.
*/
static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
case CPU_DYING:
case CPU_POST_DEAD:
return workqueue_cpu_callback(nfb, action, hcpu);
}
return NOTIFY_OK;
}
#ifdef CONFIG_SMP
struct work_for_cpu {
struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
static void work_for_cpu_fn(struct work_struct *work)
{
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
wfc->ret = wfc->fn(wfc->arg);
}
/**
* work_on_cpu - run a function in user context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function arg
*
* This will return the value @fn returns.
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
/**
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their frozen_works list instead of
* gcwq->worklist.
*
* CONTEXT:
* Grabs and releases workqueue_lock and gcwq->lock's.
*/
void freeze_workqueues_begin(void)
{
unsigned int cpu;
spin_lock(&workqueue_lock);
BUG_ON(workqueue_freezing);
workqueue_freezing = true;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock);
BUG_ON(gcwq->flags & GCWQ_FREEZING);
gcwq->flags |= GCWQ_FREEZING;
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (cwq && wq->flags & WQ_FREEZABLE)
cwq->max_active = 0;
}
spin_unlock_irq(&gcwq->lock);
}
spin_unlock(&workqueue_lock);
}
/**
* freeze_workqueues_busy - are freezable workqueues still busy?
*
* Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
* Grabs and releases workqueue_lock.
*
* RETURNS:
* %true if some freezable workqueues are still busy. %false if freezing
* is complete.
*/
bool freeze_workqueues_busy(void)
{
unsigned int cpu;
bool busy = false;
spin_lock(&workqueue_lock);
BUG_ON(!workqueue_freezing);
for_each_gcwq_cpu(cpu) {
struct workqueue_struct *wq;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue;
BUG_ON(cwq->nr_active < 0);
if (cwq->nr_active) {
busy = true;
goto out_unlock;
}
}
}
out_unlock:
spin_unlock(&workqueue_lock);
return busy;
}
/**
* thaw_workqueues - thaw workqueues
*
* Thaw workqueues. Normal queueing is restored and all collected
* frozen works are transferred to their respective gcwq worklists.
*
* CONTEXT:
* Grabs and releases workqueue_lock and gcwq->lock's.
*/
void thaw_workqueues(void)
{
unsigned int cpu;
spin_lock(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock);
BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
gcwq->flags &= ~GCWQ_FREEZING;
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue;
/* restore max_active and repopulate worklist */
cwq->max_active = wq->saved_max_active;
while (!list_empty(&cwq->delayed_works) &&
cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
}
wake_up_worker(gcwq);
spin_unlock_irq(&gcwq->lock);
}
workqueue_freezing = false;
out_unlock:
spin_unlock(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */
static int __init init_workqueues(void)
{
unsigned int cpu;
int i;
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
spin_lock_init(&gcwq->lock);
INIT_LIST_HEAD(&gcwq->worklist);
gcwq->cpu = cpu;
gcwq->flags |= GCWQ_DISASSOCIATED;
INIT_LIST_HEAD(&gcwq->idle_list);
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
init_timer_deferrable(&gcwq->idle_timer);
gcwq->idle_timer.function = idle_worker_timeout;
gcwq->idle_timer.data = (unsigned long)gcwq;
setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
(unsigned long)gcwq);
ida_init(&gcwq->worker_ida);
gcwq->trustee_state = TRUSTEE_DONE;
init_waitqueue_head(&gcwq->trustee_wait);
}
/* create the initial worker */
for_each_online_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
if (cpu != WORK_CPU_UNBOUND)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
worker = create_worker(gcwq, true);
BUG_ON(!worker);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
spin_unlock_irq(&gcwq->lock);
}
system_wq = alloc_workqueue("events", 0, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
!system_unbound_wq || !system_freezable_wq ||
!system_nrt_freezable_wq);
return 0;
}
early_initcall(init_workqueues);
|
<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>Qt 4.4: main.cpp Example File (painting/fontsampler/main.cpp)</title>
<link href="classic.css" rel="stylesheet" type="text/css" />
</head>
<body>
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<td align="left" valign="top" width="32"><a href="http://www.trolltech.com/products/qt"><img src="images/qt-logo.png" align="left" border="0" /></a></td>
<td width="1"> </td><td class="postheader" valign="center"><a href="index.html"><font color="#004faf">Home</font></a> · <a href="namespaces.html"><font color="#004faf">All Namespaces</font></a> · <a href="classes.html"><font color="#004faf">All Classes</font></a> · <a href="mainclasses.html"><font color="#004faf">Main Classes</font></a> · <a href="groups.html"><font color="#004faf">Grouped Classes</font></a> · <a href="modules.html"><font color="#004faf">Modules</font></a> · <a href="functions.html"><font color="#004faf">Functions</font></a></td>
<td align="right" valign="top" width="230"></td></tr></table><h1 class="title">main.cpp Example File<br /><span class="small-subtitle">painting/fontsampler/main.cpp</span>
</h1>
<pre><span class="comment"> /****************************************************************************
**
** Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
** Contact: Qt Software Information (qt-info@nokia.com)
**
** This file is part of the example classes of the Qt Toolkit.
**
** Commercial Usage
** Licensees holding valid Qt Commercial licenses may use this file in
** accordance with the Qt Commercial License Agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Nokia.
**
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License versions 2.0 or 3.0 as published by the Free
** Software Foundation and appearing in the file LICENSE.GPL included in
** the packaging of this file. Please review the following information
** to ensure GNU General Public Licensing requirements will be met:
** http://www.fsf.org/licensing/licenses/info/GPLv2.html and
** http://www.gnu.org/copyleft/gpl.html. In addition, as a special
** exception, Nokia gives you certain additional rights. These rights
** are described in the Nokia Qt GPL Exception version 1.3, included in
** the file GPL_EXCEPTION.txt in this package.
**
** Qt for Windows(R) Licensees
** As a special exception, Nokia, as the sole copyright holder for Qt
** Designer, grants users of the Qt/Eclipse Integration plug-in the
** right for the Qt/Eclipse Integration to link to functionality
** provided by Qt Designer and its related libraries.
**
** If you are unsure which license is appropriate for your use, please
** contact the sales department at qt-sales@nokia.com.
**
****************************************************************************/</span>
#include <QApplication>
#include "mainwindow.h"
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainWindow window;
window.show();
return app.exec();
}</pre>
<p /><address><hr /><div align="center">
<table width="100%" cellspacing="0" border="0"><tr class="address">
<td width="30%" align="left">Copyright © 2008 Nokia</td>
<td width="40%" align="center"><a href="trademarks.html">Trademarks</a></td>
<td width="30%" align="right"><div align="right">Qt 4.4.3</div></td>
</tr></table></div></address></body>
</html>
|
/*
* Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "World.h"
// This is where scripts' loading functions should be declared:
// world
void AddSC_areatrigger_scripts();
void AddSC_emerald_dragons();
void AddSC_generic_creature();
void AddSC_go_scripts();
void AddSC_guards();
void AddSC_item_scripts();
void AddSC_npc_professions();
void AddSC_npc_innkeeper();
void AddSC_npcs_special();
void AddSC_achievement_scripts();
void AddSC_action_ip_logger();
void AddSC_scene_scripts();
// player
void AddSC_chat_log();
void AddSC_duel_reset();
// The name of this function should match:
// void Add${NameOfDirectory}Scripts()
void AddWorldScripts()
{
AddSC_areatrigger_scripts();
AddSC_emerald_dragons();
AddSC_generic_creature();
AddSC_go_scripts();
AddSC_guards();
AddSC_item_scripts();
AddSC_npc_professions();
AddSC_npc_innkeeper();
AddSC_npcs_special();
AddSC_achievement_scripts();
AddSC_chat_log(); // location: scripts\World\chat_log.cpp
AddSC_scene_scripts();
// FIXME: This should be moved in a script validation hook.
// To avoid duplicate code, we check once /*ONLY*/ if logging is permitted or not.
if (sWorld->getBoolConfig(CONFIG_IP_BASED_ACTION_LOGGING))
AddSC_action_ip_logger(); // location: scripts\World\action_ip_logger.cpp
AddSC_duel_reset();
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Oracle. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "volumes.h"
#include "locking.h"
#include "btrfs_inode.h"
#include "async-thread.h"
#include "free-space-cache.h"
#include "inode-map.h"
#include "qgroup.h"
#include "print-tree.h"
/*
* backref_node, mapping_node and tree_block start with this
*/
struct tree_entry {
struct rb_node rb_node;
u64 bytenr;
};
/*
* present a tree block in the backref cache
*/
struct backref_node {
struct rb_node rb_node;
u64 bytenr;
u64 new_bytenr;
/* objectid of tree block owner, can be not uptodate */
u64 owner;
/* link to pending, changed or detached list */
struct list_head list;
/* list of upper level blocks reference this block */
struct list_head upper;
/* list of child blocks in the cache */
struct list_head lower;
/* NULL if this node is not tree root */
struct btrfs_root *root;
/* extent buffer got by COW the block */
struct extent_buffer *eb;
/* level of tree block */
unsigned int level:8;
/* is the block in non-reference counted tree */
unsigned int cowonly:1;
/* 1 if no child node in the cache */
unsigned int lowest:1;
/* is the extent buffer locked */
unsigned int locked:1;
/* has the block been processed */
unsigned int processed:1;
/* have backrefs of this block been checked */
unsigned int checked:1;
/*
* 1 if corresponding block has been cowed but some upper
* level block pointers may not point to the new location
*/
unsigned int pending:1;
/*
* 1 if the backref node isn't connected to any other
* backref node.
*/
unsigned int detached:1;
};
/*
* present a block pointer in the backref cache
*/
struct backref_edge {
struct list_head list[2];
struct backref_node *node[2];
};
#define LOWER 0
#define UPPER 1
#define RELOCATION_RESERVED_NODES 256
struct backref_cache {
/* red black tree of all backref nodes in the cache */
struct rb_root rb_root;
/* for passing backref nodes to btrfs_reloc_cow_block */
struct backref_node *path[BTRFS_MAX_LEVEL];
/*
* list of blocks that have been cowed but some block
* pointers in upper level blocks may not reflect the
* new location
*/
struct list_head pending[BTRFS_MAX_LEVEL];
/* list of backref nodes with no child node */
struct list_head leaves;
/* list of blocks that have been cowed in current transaction */
struct list_head changed;
/* list of detached backref node. */
struct list_head detached;
u64 last_trans;
int nr_nodes;
int nr_edges;
};
/*
* map address of tree root to tree
*/
struct mapping_node {
struct rb_node rb_node;
u64 bytenr;
void *data;
};
struct mapping_tree {
struct rb_root rb_root;
spinlock_t lock;
};
/*
* present a tree block to process
*/
struct tree_block {
struct rb_node rb_node;
u64 bytenr;
struct btrfs_key key;
unsigned int level:8;
unsigned int key_ready:1;
};
#define MAX_EXTENTS 128
struct file_extent_cluster {
u64 start;
u64 end;
u64 boundary[MAX_EXTENTS];
unsigned int nr;
};
struct reloc_control {
/* block group to relocate */
struct btrfs_block_group_cache *block_group;
/* extent tree */
struct btrfs_root *extent_root;
/* inode for moving data */
struct inode *data_inode;
struct btrfs_block_rsv *block_rsv;
struct backref_cache backref_cache;
struct file_extent_cluster cluster;
/* tree blocks have been processed */
struct extent_io_tree processed_blocks;
/* map start of tree root to corresponding reloc tree */
struct mapping_tree reloc_root_tree;
/* list of reloc trees */
struct list_head reloc_roots;
/* list of subvolume trees that get relocated */
struct list_head dirty_subvol_roots;
/* size of metadata reservation for merging reloc trees */
u64 merging_rsv_size;
/* size of relocated tree nodes */
u64 nodes_relocated;
/* reserved size for block group relocation*/
u64 reserved_bytes;
u64 search_start;
u64 extents_found;
unsigned int stage:8;
unsigned int create_reloc_tree:1;
unsigned int merge_reloc_tree:1;
unsigned int found_file_extent:1;
};
/* stages of data relocation */
#define MOVE_DATA_EXTENTS 0
#define UPDATE_DATA_PTRS 1
static void remove_backref_node(struct backref_cache *cache,
struct backref_node *node);
static void __mark_block_processed(struct reloc_control *rc,
struct backref_node *node);
static void mapping_tree_init(struct mapping_tree *tree)
{
tree->rb_root = RB_ROOT;
spin_lock_init(&tree->lock);
}
static void backref_cache_init(struct backref_cache *cache)
{
int i;
cache->rb_root = RB_ROOT;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
INIT_LIST_HEAD(&cache->pending[i]);
INIT_LIST_HEAD(&cache->changed);
INIT_LIST_HEAD(&cache->detached);
INIT_LIST_HEAD(&cache->leaves);
}
static void backref_cache_cleanup(struct backref_cache *cache)
{
struct backref_node *node;
int i;
while (!list_empty(&cache->detached)) {
node = list_entry(cache->detached.next,
struct backref_node, list);
remove_backref_node(cache, node);
}
while (!list_empty(&cache->leaves)) {
node = list_entry(cache->leaves.next,
struct backref_node, lower);
remove_backref_node(cache, node);
}
cache->last_trans = 0;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
ASSERT(list_empty(&cache->pending[i]));
ASSERT(list_empty(&cache->changed));
ASSERT(list_empty(&cache->detached));
ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
ASSERT(!cache->nr_nodes);
ASSERT(!cache->nr_edges);
}
static struct backref_node *alloc_backref_node(struct backref_cache *cache)
{
struct backref_node *node;
node = kzalloc(sizeof(*node), GFP_NOFS);
if (node) {
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->upper);
INIT_LIST_HEAD(&node->lower);
RB_CLEAR_NODE(&node->rb_node);
cache->nr_nodes++;
}
return node;
}
static void free_backref_node(struct backref_cache *cache,
struct backref_node *node)
{
if (node) {
cache->nr_nodes--;
kfree(node);
}
}
static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
{
struct backref_edge *edge;
edge = kzalloc(sizeof(*edge), GFP_NOFS);
if (edge)
cache->nr_edges++;
return edge;
}
static void free_backref_edge(struct backref_cache *cache,
struct backref_edge *edge)
{
if (edge) {
cache->nr_edges--;
kfree(edge);
}
}
static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct tree_entry *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct tree_entry, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
{
struct rb_node *n = root->rb_node;
struct tree_entry *entry;
while (n) {
entry = rb_entry(n, struct tree_entry, rb_node);
if (bytenr < entry->bytenr)
n = n->rb_left;
else if (bytenr > entry->bytenr)
n = n->rb_right;
else
return n;
}
return NULL;
}
static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
{
struct btrfs_fs_info *fs_info = NULL;
struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
rb_node);
if (bnode->root)
fs_info = bnode->root->fs_info;
btrfs_panic(fs_info, errno,
"Inconsistency in backref cache found at offset %llu",
bytenr);
}
/*
* walk up backref nodes until reach node presents tree root
*/
static struct backref_node *walk_up_backref(struct backref_node *node,
struct backref_edge *edges[],
int *index)
{
struct backref_edge *edge;
int idx = *index;
while (!list_empty(&node->upper)) {
edge = list_entry(node->upper.next,
struct backref_edge, list[LOWER]);
edges[idx++] = edge;
node = edge->node[UPPER];
}
BUG_ON(node->detached);
*index = idx;
return node;
}
/*
* walk down backref nodes to find start of next reference path
*/
static struct backref_node *walk_down_backref(struct backref_edge *edges[],
int *index)
{
struct backref_edge *edge;
struct backref_node *lower;
int idx = *index;
while (idx > 0) {
edge = edges[idx - 1];
lower = edge->node[LOWER];
if (list_is_last(&edge->list[LOWER], &lower->upper)) {
idx--;
continue;
}
edge = list_entry(edge->list[LOWER].next,
struct backref_edge, list[LOWER]);
edges[idx - 1] = edge;
*index = idx;
return edge->node[UPPER];
}
*index = 0;
return NULL;
}
static void unlock_node_buffer(struct backref_node *node)
{
if (node->locked) {
btrfs_tree_unlock(node->eb);
node->locked = 0;
}
}
static void drop_node_buffer(struct backref_node *node)
{
if (node->eb) {
unlock_node_buffer(node);
free_extent_buffer(node->eb);
node->eb = NULL;
}
}
static void drop_backref_node(struct backref_cache *tree,
struct backref_node *node)
{
BUG_ON(!list_empty(&node->upper));
drop_node_buffer(node);
list_del(&node->list);
list_del(&node->lower);
if (!RB_EMPTY_NODE(&node->rb_node))
rb_erase(&node->rb_node, &tree->rb_root);
free_backref_node(tree, node);
}
/*
* remove a backref node from the backref cache
*/
static void remove_backref_node(struct backref_cache *cache,
struct backref_node *node)
{
struct backref_node *upper;
struct backref_edge *edge;
if (!node)
return;
BUG_ON(!node->lowest && !node->detached);
while (!list_empty(&node->upper)) {
edge = list_entry(node->upper.next, struct backref_edge,
list[LOWER]);
upper = edge->node[UPPER];
list_del(&edge->list[LOWER]);
list_del(&edge->list[UPPER]);
free_backref_edge(cache, edge);
if (RB_EMPTY_NODE(&upper->rb_node)) {
BUG_ON(!list_empty(&node->upper));
drop_backref_node(cache, node);
node = upper;
node->lowest = 1;
continue;
}
/*
* add the node to leaf node list if no other
* child block cached.
*/
if (list_empty(&upper->lower)) {
list_add_tail(&upper->lower, &cache->leaves);
upper->lowest = 1;
}
}
drop_backref_node(cache, node);
}
static void update_backref_node(struct backref_cache *cache,
struct backref_node *node, u64 bytenr)
{
struct rb_node *rb_node;
rb_erase(&node->rb_node, &cache->rb_root);
node->bytenr = bytenr;
rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, bytenr);
}
/*
* update backref cache after a transaction commit
*/
static int update_backref_cache(struct btrfs_trans_handle *trans,
struct backref_cache *cache)
{
struct backref_node *node;
int level = 0;
if (cache->last_trans == 0) {
cache->last_trans = trans->transid;
return 0;
}
if (cache->last_trans == trans->transid)
return 0;
/*
* detached nodes are used to avoid unnecessary backref
* lookup. transaction commit changes the extent tree.
* so the detached nodes are no longer useful.
*/
while (!list_empty(&cache->detached)) {
node = list_entry(cache->detached.next,
struct backref_node, list);
remove_backref_node(cache, node);
}
while (!list_empty(&cache->changed)) {
node = list_entry(cache->changed.next,
struct backref_node, list);
list_del_init(&node->list);
BUG_ON(node->pending);
update_backref_node(cache, node, node->new_bytenr);
}
/*
* some nodes can be left in the pending list if there were
* errors during processing the pending nodes.
*/
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
list_for_each_entry(node, &cache->pending[level], list) {
BUG_ON(!node->pending);
if (node->bytenr == node->new_bytenr)
continue;
update_backref_node(cache, node, node->new_bytenr);
}
}
cache->last_trans = 0;
return 1;
}
static int should_ignore_root(struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return 0;
reloc_root = root->reloc_root;
if (!reloc_root)
return 0;
if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
root->fs_info->running_transaction->transid - 1)
return 0;
/*
* if there is reloc tree and it was created in previous
* transaction backref lookup can find the reloc tree,
* so backref node for the fs tree root is useless for
* relocation.
*/
return 1;
}
/*
* find reloc tree by address of tree root
*/
static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
u64 bytenr)
{
struct rb_node *rb_node;
struct mapping_node *node;
struct btrfs_root *root = NULL;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
root = (struct btrfs_root *)node->data;
}
spin_unlock(&rc->reloc_root_tree.lock);
return root;
}
static int is_cowonly_root(u64 root_objectid)
{
if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
root_objectid == BTRFS_DEV_TREE_OBJECTID ||
root_objectid == BTRFS_TREE_LOG_OBJECTID ||
root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
root_objectid == BTRFS_UUID_TREE_OBJECTID ||
root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
return 1;
return 0;
}
static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
u64 root_objectid)
{
struct btrfs_key key;
key.objectid = root_objectid;
key.type = BTRFS_ROOT_ITEM_KEY;
if (is_cowonly_root(root_objectid))
key.offset = 0;
else
key.offset = (u64)-1;
return btrfs_get_fs_root(fs_info, &key, false);
}
static noinline_for_stack
int find_inline_backref(struct extent_buffer *leaf, int slot,
unsigned long *ptr, unsigned long *end)
{
struct btrfs_key key;
struct btrfs_extent_item *ei;
struct btrfs_tree_block_info *bi;
u32 item_size;
btrfs_item_key_to_cpu(leaf, &key, slot);
item_size = btrfs_item_size_nr(leaf, slot);
if (item_size < sizeof(*ei)) {
btrfs_print_v0_err(leaf->fs_info);
btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
return 1;
}
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
WARN_ON(!(btrfs_extent_flags(leaf, ei) &
BTRFS_EXTENT_FLAG_TREE_BLOCK));
if (key.type == BTRFS_EXTENT_ITEM_KEY &&
item_size <= sizeof(*ei) + sizeof(*bi)) {
WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
return 1;
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
item_size <= sizeof(*ei)) {
WARN_ON(item_size < sizeof(*ei));
return 1;
}
if (key.type == BTRFS_EXTENT_ITEM_KEY) {
bi = (struct btrfs_tree_block_info *)(ei + 1);
*ptr = (unsigned long)(bi + 1);
} else {
*ptr = (unsigned long)(ei + 1);
}
*end = (unsigned long)ei + item_size;
return 0;
}
/*
* build backref tree for a given tree block. root of the backref tree
* corresponds the tree block, leaves of the backref tree correspond
* roots of b-trees that reference the tree block.
*
* the basic idea of this function is check backrefs of a given block
* to find upper level blocks that reference the block, and then check
* backrefs of these upper level blocks recursively. the recursion stop
* when tree root is reached or backrefs for the block is cached.
*
* NOTE: if we find backrefs for a block are cached, we know backrefs
* for all upper level blocks that directly/indirectly reference the
* block are also cached.
*/
static noinline_for_stack
struct backref_node *build_backref_tree(struct reloc_control *rc,
struct btrfs_key *node_key,
int level, u64 bytenr)
{
struct backref_cache *cache = &rc->backref_cache;
struct btrfs_path *path1; /* For searching extent root */
struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
struct extent_buffer *eb;
struct btrfs_root *root;
struct backref_node *cur;
struct backref_node *upper;
struct backref_node *lower;
struct backref_node *node = NULL;
struct backref_node *exist = NULL;
struct backref_edge *edge;
struct rb_node *rb_node;
struct btrfs_key key;
unsigned long end;
unsigned long ptr;
LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
LIST_HEAD(useless);
int cowonly;
int ret;
int err = 0;
bool need_check = true;
path1 = btrfs_alloc_path();
path2 = btrfs_alloc_path();
if (!path1 || !path2) {
err = -ENOMEM;
goto out;
}
path1->reada = READA_FORWARD;
path2->reada = READA_FORWARD;
node = alloc_backref_node(cache);
if (!node) {
err = -ENOMEM;
goto out;
}
node->bytenr = bytenr;
node->level = level;
node->lowest = 1;
cur = node;
again:
end = 0;
ptr = 0;
key.objectid = cur->bytenr;
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = (u64)-1;
path1->search_commit_root = 1;
path1->skip_locking = 1;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
0, 0);
if (ret < 0) {
err = ret;
goto out;
}
ASSERT(ret);
ASSERT(path1->slots[0]);
path1->slots[0]--;
WARN_ON(cur->checked);
if (!list_empty(&cur->upper)) {
/*
* the backref was added previously when processing
* backref of type BTRFS_TREE_BLOCK_REF_KEY
*/
ASSERT(list_is_singular(&cur->upper));
edge = list_entry(cur->upper.next, struct backref_edge,
list[LOWER]);
ASSERT(list_empty(&edge->list[UPPER]));
exist = edge->node[UPPER];
/*
* add the upper level block to pending list if we need
* check its backrefs
*/
if (!exist->checked)
list_add_tail(&edge->list[UPPER], &list);
} else {
exist = NULL;
}
while (1) {
cond_resched();
eb = path1->nodes[0];
if (ptr >= end) {
if (path1->slots[0] >= btrfs_header_nritems(eb)) {
ret = btrfs_next_leaf(rc->extent_root, path1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
break;
eb = path1->nodes[0];
}
btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
if (key.objectid != cur->bytenr) {
WARN_ON(exist);
break;
}
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
ret = find_inline_backref(eb, path1->slots[0],
&ptr, &end);
if (ret)
goto next;
}
}
if (ptr < end) {
/* update key for inline back ref */
struct btrfs_extent_inline_ref *iref;
int type;
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
if (type == BTRFS_REF_TYPE_INVALID) {
err = -EUCLEAN;
goto out;
}
key.type = type;
key.offset = btrfs_extent_inline_ref_offset(eb, iref);
WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
key.type != BTRFS_SHARED_BLOCK_REF_KEY);
}
/*
* Parent node found and matches current inline ref, no need to
* rebuild this node for this inline ref.
*/
if (exist &&
((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
exist->owner == key.offset) ||
(key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
exist->bytenr == key.offset))) {
exist = NULL;
goto next;
}
/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
if (key.objectid == key.offset) {
/*
* Only root blocks of reloc trees use backref
* pointing to itself.
*/
root = find_reloc_root(rc, cur->bytenr);
ASSERT(root);
cur->root = root;
break;
}
edge = alloc_backref_edge(cache);
if (!edge) {
err = -ENOMEM;
goto out;
}
rb_node = tree_search(&cache->rb_root, key.offset);
if (!rb_node) {
upper = alloc_backref_node(cache);
if (!upper) {
free_backref_edge(cache, edge);
err = -ENOMEM;
goto out;
}
upper->bytenr = key.offset;
upper->level = cur->level + 1;
/*
* backrefs for the upper level block isn't
* cached, add the block to pending list
*/
list_add_tail(&edge->list[UPPER], &list);
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
list_add_tail(&edge->list[LOWER], &cur->upper);
edge->node[LOWER] = cur;
edge->node[UPPER] = upper;
goto next;
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
err = -EINVAL;
btrfs_print_v0_err(rc->extent_root->fs_info);
btrfs_handle_fs_error(rc->extent_root->fs_info, err,
NULL);
goto out;
} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
goto next;
}
/*
* key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
* means the root objectid. We need to search the tree to get
* its parent bytenr.
*/
root = read_fs_root(rc->extent_root->fs_info, key.offset);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
}
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
cur->cowonly = 1;
if (btrfs_root_level(&root->root_item) == cur->level) {
/* tree root */
ASSERT(btrfs_root_bytenr(&root->root_item) ==
cur->bytenr);
if (should_ignore_root(root))
list_add(&cur->list, &useless);
else
cur->root = root;
break;
}
level = cur->level + 1;
/* Search the tree to find parent blocks referring the block. */
path2->search_commit_root = 1;
path2->skip_locking = 1;
path2->lowest_level = level;
ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
path2->lowest_level = 0;
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0 && path2->slots[level] > 0)
path2->slots[level]--;
eb = path2->nodes[level];
if (btrfs_node_blockptr(eb, path2->slots[level]) !=
cur->bytenr) {
btrfs_err(root->fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
cur->bytenr, level - 1,
root->root_key.objectid,
node_key->objectid, node_key->type,
node_key->offset);
err = -ENOENT;
goto out;
}
lower = cur;
need_check = true;
/* Add all nodes and edges in the path */
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path2->nodes[level]) {
ASSERT(btrfs_root_bytenr(&root->root_item) ==
lower->bytenr);
if (should_ignore_root(root))
list_add(&lower->list, &useless);
else
lower->root = root;
break;
}
edge = alloc_backref_edge(cache);
if (!edge) {
err = -ENOMEM;
goto out;
}
eb = path2->nodes[level];
rb_node = tree_search(&cache->rb_root, eb->start);
if (!rb_node) {
upper = alloc_backref_node(cache);
if (!upper) {
free_backref_edge(cache, edge);
err = -ENOMEM;
goto out;
}
upper->bytenr = eb->start;
upper->owner = btrfs_header_owner(eb);
upper->level = lower->level + 1;
if (!test_bit(BTRFS_ROOT_REF_COWS,
&root->state))
upper->cowonly = 1;
/*
* if we know the block isn't shared
* we can void checking its backrefs.
*/
if (btrfs_block_can_be_shared(root, eb))
upper->checked = 0;
else
upper->checked = 1;
/*
* add the block to pending list if we
* need check its backrefs, we only do this once
* while walking up a tree as we will catch
* anything else later on.
*/
if (!upper->checked && need_check) {
need_check = false;
list_add_tail(&edge->list[UPPER],
&list);
} else {
if (upper->checked)
need_check = true;
INIT_LIST_HEAD(&edge->list[UPPER]);
}
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
if (!upper->owner)
upper->owner = btrfs_header_owner(eb);
}
list_add_tail(&edge->list[LOWER], &lower->upper);
edge->node[LOWER] = lower;
edge->node[UPPER] = upper;
if (rb_node)
break;
lower = upper;
upper = NULL;
}
btrfs_release_path(path2);
next:
if (ptr < end) {
ptr += btrfs_extent_inline_ref_size(key.type);
if (ptr >= end) {
WARN_ON(ptr > end);
ptr = 0;
end = 0;
}
}
if (ptr >= end)
path1->slots[0]++;
}
btrfs_release_path(path1);
cur->checked = 1;
WARN_ON(exist);
/* the pending list isn't empty, take the first block to process */
if (!list_empty(&list)) {
edge = list_entry(list.next, struct backref_edge, list[UPPER]);
list_del_init(&edge->list[UPPER]);
cur = edge->node[UPPER];
goto again;
}
/*
* everything goes well, connect backref nodes and insert backref nodes
* into the cache.
*/
ASSERT(node->checked);
cowonly = node->cowonly;
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, node->bytenr,
&node->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, node->bytenr);
list_add_tail(&node->lower, &cache->leaves);
}
list_for_each_entry(edge, &node->upper, list[LOWER])
list_add_tail(&edge->list[UPPER], &list);
while (!list_empty(&list)) {
edge = list_entry(list.next, struct backref_edge, list[UPPER]);
list_del_init(&edge->list[UPPER]);
upper = edge->node[UPPER];
if (upper->detached) {
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
free_backref_edge(cache, edge);
if (list_empty(&lower->upper))
list_add(&lower->list, &useless);
continue;
}
if (!RB_EMPTY_NODE(&upper->rb_node)) {
if (upper->lowest) {
list_del_init(&upper->lower);
upper->lowest = 0;
}
list_add_tail(&edge->list[UPPER], &upper->lower);
continue;
}
if (!upper->checked) {
/*
* Still want to blow up for developers since this is a
* logic bug.
*/
ASSERT(0);
err = -EINVAL;
goto out;
}
if (cowonly != upper->cowonly) {
ASSERT(0);
err = -EINVAL;
goto out;
}
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, upper->bytenr,
&upper->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST,
upper->bytenr);
}
list_add_tail(&edge->list[UPPER], &upper->lower);
list_for_each_entry(edge, &upper->upper, list[LOWER])
list_add_tail(&edge->list[UPPER], &list);
}
/*
* process useless backref nodes. backref nodes for tree leaves
* are deleted from the cache. backref nodes for upper level
* tree blocks are left in the cache to avoid unnecessary backref
* lookup.
*/
while (!list_empty(&useless)) {
upper = list_entry(useless.next, struct backref_node, list);
list_del_init(&upper->list);
ASSERT(list_empty(&upper->upper));
if (upper == node)
node = NULL;
if (upper->lowest) {
list_del_init(&upper->lower);
upper->lowest = 0;
}
while (!list_empty(&upper->lower)) {
edge = list_entry(upper->lower.next,
struct backref_edge, list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
free_backref_edge(cache, edge);
if (list_empty(&lower->upper))
list_add(&lower->list, &useless);
}
__mark_block_processed(rc, upper);
if (upper->level > 0) {
list_add(&upper->list, &cache->detached);
upper->detached = 1;
} else {
rb_erase(&upper->rb_node, &cache->rb_root);
free_backref_node(cache, upper);
}
}
out:
btrfs_free_path(path1);
btrfs_free_path(path2);
if (err) {
while (!list_empty(&useless)) {
lower = list_entry(useless.next,
struct backref_node, list);
list_del_init(&lower->list);
}
while (!list_empty(&list)) {
edge = list_first_entry(&list, struct backref_edge,
list[UPPER]);
list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
lower = edge->node[LOWER];
upper = edge->node[UPPER];
free_backref_edge(cache, edge);
/*
* Lower is no longer linked to any upper backref nodes
* and isn't in the cache, we can free it ourselves.
*/
if (list_empty(&lower->upper) &&
RB_EMPTY_NODE(&lower->rb_node))
list_add(&lower->list, &useless);
if (!RB_EMPTY_NODE(&upper->rb_node))
continue;
/* Add this guy's upper edges to the list to process */
list_for_each_entry(edge, &upper->upper, list[LOWER])
list_add_tail(&edge->list[UPPER], &list);
if (list_empty(&upper->upper))
list_add(&upper->list, &useless);
}
while (!list_empty(&useless)) {
lower = list_entry(useless.next,
struct backref_node, list);
list_del_init(&lower->list);
if (lower == node)
node = NULL;
free_backref_node(cache, lower);
}
free_backref_node(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
return node;
}
/*
* helper to add backref node for the newly created snapshot.
* the backref node is created by cloning backref node that
* corresponds to root of source tree
*/
static int clone_backref_node(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct btrfs_root *src,
struct btrfs_root *dest)
{
struct btrfs_root *reloc_root = src->reloc_root;
struct backref_cache *cache = &rc->backref_cache;
struct backref_node *node = NULL;
struct backref_node *new_node;
struct backref_edge *edge;
struct backref_edge *new_edge;
struct rb_node *rb_node;
if (cache->last_trans > 0)
update_backref_cache(trans, cache);
rb_node = tree_search(&cache->rb_root, src->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct backref_node, rb_node);
if (node->detached)
node = NULL;
else
BUG_ON(node->new_bytenr != reloc_root->node->start);
}
if (!node) {
rb_node = tree_search(&cache->rb_root,
reloc_root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct backref_node,
rb_node);
BUG_ON(node->detached);
}
}
if (!node)
return 0;
new_node = alloc_backref_node(cache);
if (!new_node)
return -ENOMEM;
new_node->bytenr = dest->node->start;
new_node->level = node->level;
new_node->lowest = node->lowest;
new_node->checked = 1;
new_node->root = dest;
if (!node->lowest) {
list_for_each_entry(edge, &node->lower, list[UPPER]) {
new_edge = alloc_backref_edge(cache);
if (!new_edge)
goto fail;
new_edge->node[UPPER] = new_node;
new_edge->node[LOWER] = edge->node[LOWER];
list_add_tail(&new_edge->list[UPPER],
&new_node->lower);
}
} else {
list_add_tail(&new_node->lower, &cache->leaves);
}
rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
&new_node->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
if (!new_node->lowest) {
list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
list_add_tail(&new_edge->list[LOWER],
&new_edge->node[LOWER]->upper);
}
}
return 0;
fail:
while (!list_empty(&new_node->lower)) {
new_edge = list_entry(new_node->lower.next,
struct backref_edge, list[UPPER]);
list_del(&new_edge->list[UPPER]);
free_backref_edge(cache, new_edge);
}
free_backref_node(cache, new_node);
return -ENOMEM;
}
/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
static int __must_check __add_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node;
struct reloc_control *rc = fs_info->reloc_ctl;
node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node)
return -ENOMEM;
node->bytenr = root->node->start;
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_panic(fs_info, -EEXIST,
"Duplicate root found for start=%llu while inserting into relocation tree",
node->bytenr);
}
list_add_tail(&root->root_list, &rc->reloc_roots);
return 0;
}
/*
* helper to delete the 'address of tree root -> reloc tree'
* mapping
*/
static void __del_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
}
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
return;
BUG_ON((struct btrfs_root *)node->data != root);
}
spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
kfree(node);
}
/*
* helper to update the 'address of tree root -> reloc tree'
* mapping
*/
static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
}
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
return 0;
BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&rc->reloc_root_tree.lock);
node->bytenr = new_bytenr;
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, node->bytenr);
return 0;
}
static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct extent_buffer *eb;
struct btrfs_root_item *root_item;
struct btrfs_key root_key;
int ret;
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
BUG_ON(!root_item);
root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
root_key.type = BTRFS_ROOT_ITEM_KEY;
root_key.offset = objectid;
if (root->root_key.objectid == objectid) {
u64 commit_root_gen;
/* called by btrfs_init_reloc_root */
ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
BTRFS_TREE_RELOC_OBJECTID);
BUG_ON(ret);
/*
* Set the last_snapshot field to the generation of the commit
* root - like this ctree.c:btrfs_block_can_be_shared() behaves
* correctly (returns true) when the relocation root is created
* either inside the critical section of a transaction commit
* (through transaction.c:qgroup_account_snapshot()) and when
* it's created before the transaction commit is started.
*/
commit_root_gen = btrfs_header_generation(root->commit_root);
btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
} else {
/*
* called by btrfs_reloc_post_snapshot_hook.
* the source tree is a reloc tree, all tree blocks
* modified after it was created have RELOC flag
* set in their headers. so it's OK to not update
* the 'last_snapshot'.
*/
ret = btrfs_copy_root(trans, root, root->node, &eb,
BTRFS_TREE_RELOC_OBJECTID);
BUG_ON(ret);
}
memcpy(root_item, &root->root_item, sizeof(*root_item));
btrfs_set_root_bytenr(root_item, eb->start);
btrfs_set_root_level(root_item, btrfs_header_level(eb));
btrfs_set_root_generation(root_item, trans->transid);
if (root->root_key.objectid == objectid) {
btrfs_set_root_refs(root_item, 0);
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
root_item->drop_level = 0;
}
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item);
BUG_ON(ret);
kfree(root_item);
reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid;
return reloc_root;
}
/*
* create reloc tree for a given fs tree. reloc tree is just a
* snapshot of the fs tree with special root objectid.
*/
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct reloc_control *rc = fs_info->reloc_ctl;
struct btrfs_block_rsv *rsv;
int clear_rsv = 0;
int ret;
if (root->reloc_root) {
reloc_root = root->reloc_root;
reloc_root->last_trans = trans->transid;
return 0;
}
if (!rc || !rc->create_reloc_tree ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
return 0;
if (!trans->reloc_reserved) {
rsv = trans->block_rsv;
trans->block_rsv = rc->block_rsv;
clear_rsv = 1;
}
reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
if (clear_rsv)
trans->block_rsv = rsv;
ret = __add_reloc_root(reloc_root);
BUG_ON(ret < 0);
root->reloc_root = reloc_root;
return 0;
}
/*
* update root item of reloc tree
*/
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
int ret;
if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
!root->reloc_root)
goto out;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
/* root->reloc_root will stay until current relocation finished */
if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
__del_reloc_root(reloc_root);
}
if (reloc_root->commit_root != reloc_root->node) {
btrfs_set_root_node(root_item, reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->commit_root = btrfs_root_node(reloc_root);
}
ret = btrfs_update_root(trans, fs_info->tree_root,
&reloc_root->root_key, root_item);
BUG_ON(ret);
out:
return 0;
}
/*
* helper to find first cached inode with inode number >= objectid
* in a subvolume
*/
static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
{
struct rb_node *node;
struct rb_node *prev;
struct btrfs_inode *entry;
struct inode *inode;
spin_lock(&root->inode_lock);
again:
node = root->inode_tree.rb_node;
prev = NULL;
while (node) {
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
if (objectid < btrfs_ino(entry))
node = node->rb_left;
else if (objectid > btrfs_ino(entry))
node = node->rb_right;
else
break;
}
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
if (objectid <= btrfs_ino(entry)) {
node = prev;
break;
}
prev = rb_next(prev);
}
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
return inode;
}
objectid = btrfs_ino(entry) + 1;
if (cond_resched_lock(&root->inode_lock))
goto again;
node = rb_next(node);
}
spin_unlock(&root->inode_lock);
return NULL;
}
static int in_block_group(u64 bytenr,
struct btrfs_block_group_cache *block_group)
{
if (bytenr >= block_group->key.objectid &&
bytenr < block_group->key.objectid + block_group->key.offset)
return 1;
return 0;
}
/*
* get new location of data
*/
static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
u64 bytenr, u64 num_bytes)
{
struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
bytenr -= BTRFS_I(reloc_inode)->index_cnt;
ret = btrfs_lookup_file_extent(NULL, root, path,
btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi));
if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
ret = -EINVAL;
goto out;
}
*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
/*
* update file extent items in the tree leaf to point to
* the new locations.
*/
static noinline_for_stack
int replace_file_extents(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct btrfs_root *root,
struct extent_buffer *leaf)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
struct inode *inode = NULL;
u64 parent;
u64 bytenr;
u64 new_bytenr = 0;
u64 num_bytes;
u64 end;
u32 nritems;
u32 i;
int ret = 0;
int first = 1;
int dirty = 0;
if (rc->stage != UPDATE_DATA_PTRS)
return 0;
/* reloc trees always use full backref */
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
parent = leaf->start;
else
parent = 0;
nritems = btrfs_header_nritems(leaf);
for (i = 0; i < nritems; i++) {
cond_resched();
btrfs_item_key_to_cpu(leaf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
if (bytenr == 0)
continue;
if (!in_block_group(bytenr, rc->block_group))
continue;
/*
* if we are modifying block in fs tree, wait for readpage
* to complete and drop the extent cache
*/
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
if (first) {
inode = find_next_inode(root, key.objectid);
first = 0;
} else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
btrfs_add_delayed_iput(inode);
inode = find_next_inode(root, key.objectid);
}
if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset,
fs_info->sectorsize));
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
if (!ret)
continue;
btrfs_drop_extent_cache(BTRFS_I(inode),
key.offset, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
}
}
ret = get_new_location(rc->data_inode, &new_bytenr,
bytenr, num_bytes);
if (ret) {
/*
* Don't have to abort since we've not changed anything
* in the file extent yet.
*/
break;
}
btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
break;
}
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
break;
}
}
if (dirty)
btrfs_mark_buffer_dirty(leaf);
if (inode)
btrfs_add_delayed_iput(inode);
return ret;
}
static noinline_for_stack
int memcmp_node_keys(struct extent_buffer *eb, int slot,
struct btrfs_path *path, int level)
{
struct btrfs_disk_key key1;
struct btrfs_disk_key key2;
btrfs_node_key(eb, &key1, slot);
btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
return memcmp(&key1, &key2, sizeof(key1));
}
/*
* try to replace tree blocks in fs tree with the new blocks
* in reloc tree. tree blocks haven't been modified since the
* reloc tree was create can be replaced.
*
* if a block was replaced, level of the block + 1 is returned.
* if no block got replaced, 0 is returned. if there are other
* errors, a negative error number is returned.
*/
static noinline_for_stack
int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
struct btrfs_root *dest, struct btrfs_root *src,
struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level)
{
struct btrfs_fs_info *fs_info = dest->fs_info;
struct extent_buffer *eb;
struct extent_buffer *parent;
struct btrfs_key key;
u64 old_bytenr;
u64 new_bytenr;
u64 old_ptr_gen;
u64 new_ptr_gen;
u64 last_snapshot;
u32 blocksize;
int cow = 0;
int level;
int ret;
int slot;
BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
last_snapshot = btrfs_root_last_snapshot(&src->root_item);
again:
slot = path->slots[lowest_level];
btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
eb = btrfs_lock_root_node(dest);
btrfs_set_lock_blocking_write(eb);
level = btrfs_header_level(eb);
if (level < lowest_level) {
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
return 0;
}
if (cow) {
ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
BUG_ON(ret);
}
btrfs_set_lock_blocking_write(eb);
if (next_key) {
next_key->objectid = (u64)-1;
next_key->type = (u8)-1;
next_key->offset = (u64)-1;
}
parent = eb;
while (1) {
struct btrfs_key first_key;
level = btrfs_header_level(parent);
BUG_ON(level < lowest_level);
ret = btrfs_bin_search(parent, &key, level, &slot);
if (ret < 0)
break;
if (ret && slot > 0)
slot--;
if (next_key && slot + 1 < btrfs_header_nritems(parent))
btrfs_node_key_to_cpu(parent, next_key, slot + 1);
old_bytenr = btrfs_node_blockptr(parent, slot);
blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
btrfs_node_key_to_cpu(parent, &first_key, slot);
if (level <= max_level) {
eb = path->nodes[level];
new_bytenr = btrfs_node_blockptr(eb,
path->slots[level]);
new_ptr_gen = btrfs_node_ptr_generation(eb,
path->slots[level]);
} else {
new_bytenr = 0;
new_ptr_gen = 0;
}
if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
ret = level;
break;
}
if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
memcmp_node_keys(parent, slot, path, level)) {
if (level <= lowest_level) {
ret = 0;
break;
}
eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
level - 1, &first_key);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
} else if (!extent_buffer_uptodate(eb)) {
ret = -EIO;
free_extent_buffer(eb);
break;
}
btrfs_tree_lock(eb);
if (cow) {
ret = btrfs_cow_block(trans, dest, eb, parent,
slot, &eb);
BUG_ON(ret);
}
btrfs_set_lock_blocking_write(eb);
btrfs_tree_unlock(parent);
free_extent_buffer(parent);
parent = eb;
continue;
}
if (!cow) {
btrfs_tree_unlock(parent);
free_extent_buffer(parent);
cow = 1;
goto again;
}
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
path->lowest_level = 0;
BUG_ON(ret);
/*
* Info qgroup to trace both subtrees.
*
* We must trace both trees.
* 1) Tree reloc subtree
* If not traced, we will leak data numbers
* 2) Fs subtree
* If not traced, we will double count old data
*
* We don't scan the subtree right now, but only record
* the swapped tree blocks.
* The real subtree rescan is delayed until we have new
* CoW on the subtree root node before transaction commit.
*/
ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
rc->block_group, parent, slot,
path->nodes[level], path->slots[level],
last_snapshot);
if (ret < 0)
break;
/*
* swap blocks in fs tree and reloc tree.
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
btrfs_mark_buffer_dirty(parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
ret = btrfs_inc_extent_ref(trans, src, old_bytenr,
blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
ret = btrfs_inc_extent_ref(trans, dest, new_bytenr,
blocksize, 0, dest->root_key.objectid,
level - 1, 0);
BUG_ON(ret);
ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
btrfs_unlock_up_safe(path, 0);
ret = level;
break;
}
btrfs_tree_unlock(parent);
free_extent_buffer(parent);
return ret;
}
/*
* helper to find next relocated block in reloc tree
*/
static noinline_for_stack
int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
struct extent_buffer *eb;
int i;
u64 last_snapshot;
u32 nritems;
last_snapshot = btrfs_root_last_snapshot(&root->root_item);
for (i = 0; i < *level; i++) {
free_extent_buffer(path->nodes[i]);
path->nodes[i] = NULL;
}
for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
eb = path->nodes[i];
nritems = btrfs_header_nritems(eb);
while (path->slots[i] + 1 < nritems) {
path->slots[i]++;
if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
last_snapshot)
continue;
*level = i;
return 0;
}
free_extent_buffer(path->nodes[i]);
path->nodes[i] = NULL;
}
return 1;
}
/*
* walk down reloc tree to find relocated block of lowest level
*/
static noinline_for_stack
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
u64 bytenr;
u64 ptr_gen = 0;
u64 last_snapshot;
u32 nritems;
last_snapshot = btrfs_root_last_snapshot(&root->root_item);
for (i = *level; i > 0; i--) {
struct btrfs_key first_key;
eb = path->nodes[i];
nritems = btrfs_header_nritems(eb);
while (path->slots[i] < nritems) {
ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
if (ptr_gen > last_snapshot)
break;
path->slots[i]++;
}
if (path->slots[i] >= nritems) {
if (i == *level)
break;
*level = i + 1;
return 0;
}
if (i == 1) {
*level = i;
return 0;
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
&first_key);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
}
BUG_ON(btrfs_header_level(eb) != i - 1);
path->nodes[i - 1] = eb;
path->slots[i - 1] = 0;
}
return 1;
}
/*
* invalidate extent cache for file extents whose key in range of
* [min_key, max_key)
*/
static int invalidate_extent_cache(struct btrfs_root *root,
struct btrfs_key *min_key,
struct btrfs_key *max_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL;
u64 objectid;
u64 start, end;
u64 ino;
objectid = min_key->objectid;
while (1) {
cond_resched();
iput(inode);
if (objectid > max_key->objectid)
break;
inode = find_next_inode(root, objectid);
if (!inode)
break;
ino = btrfs_ino(BTRFS_I(inode));
if (ino > max_key->objectid) {
iput(inode);
break;
}
objectid = ino + 1;
if (!S_ISREG(inode->i_mode))
continue;
if (unlikely(min_key->objectid == ino)) {
if (min_key->type > BTRFS_EXTENT_DATA_KEY)
continue;
if (min_key->type < BTRFS_EXTENT_DATA_KEY)
start = 0;
else {
start = min_key->offset;
WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
}
} else {
start = 0;
}
if (unlikely(max_key->objectid == ino)) {
if (max_key->type < BTRFS_EXTENT_DATA_KEY)
continue;
if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
end = (u64)-1;
} else {
if (max_key->offset == 0)
continue;
end = max_key->offset;
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
}
} else {
end = (u64)-1;
}
/* the lock_extent waits for readpage to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
}
return 0;
}
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
{
while (level < BTRFS_MAX_LEVEL) {
if (!path->nodes[level])
break;
if (path->slots[level] + 1 <
btrfs_header_nritems(path->nodes[level])) {
btrfs_node_key_to_cpu(path->nodes[level], key,
path->slots[level] + 1);
return 0;
}
level++;
}
return 1;
}
/*
* Insert current subvolume into reloc_control::dirty_subvol_roots
*/
static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct btrfs_root *root)
{
struct btrfs_root *reloc_root = root->reloc_root;
struct btrfs_root_item *reloc_root_item;
/* @root must be a subvolume tree root with a valid reloc tree */
ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
ASSERT(reloc_root);
reloc_root_item = &reloc_root->root_item;
memset(&reloc_root_item->drop_progress, 0,
sizeof(reloc_root_item->drop_progress));
reloc_root_item->drop_level = 0;
btrfs_set_root_refs(reloc_root_item, 0);
btrfs_update_reloc_root(trans, root);
if (list_empty(&root->reloc_dirty_list)) {
btrfs_grab_fs_root(root);
list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
}
}
static int clean_dirty_subvols(struct reloc_control *rc)
{
struct btrfs_root *root;
struct btrfs_root *next;
int ret = 0;
list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
reloc_dirty_list) {
struct btrfs_root *reloc_root = root->reloc_root;
clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
list_del_init(&root->reloc_dirty_list);
root->reloc_root = NULL;
if (reloc_root) {
int ret2;
ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
if (ret2 < 0 && !ret)
ret = ret2;
}
btrfs_put_fs_root(root);
}
return ret;
}
/*
* merge the relocated tree blocks in reloc tree with corresponding
* fs tree.
*/
static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_key key;
struct btrfs_key next_key;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
int level;
int max_level;
int replaced = 0;
int ret;
int err = 0;
u32 min_reserved;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
reloc_root = root->reloc_root;
root_item = &reloc_root->root_item;
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_root_level(root_item);
extent_buffer_get(reloc_root->node);
path->nodes[level] = reloc_root->node;
path->slots[level] = 0;
} else {
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
level = root_item->drop_level;
BUG_ON(level == 0);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
path->lowest_level = 0;
if (ret < 0) {
btrfs_free_path(path);
return ret;
}
btrfs_node_key_to_cpu(path->nodes[level], &next_key,
path->slots[level]);
WARN_ON(memcmp(&key, &next_key, sizeof(key)));
btrfs_unlock_up_safe(path, 0);
}
min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
memset(&next_key, 0, sizeof(next_key));
while (1) {
ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
BTRFS_RESERVE_FLUSH_ALL);
if (ret) {
err = ret;
goto out;
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
trans = NULL;
goto out;
}
trans->block_rsv = rc->block_rsv;
replaced = 0;
max_level = level;
ret = walk_down_reloc_tree(reloc_root, path, &level);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0)
break;
if (!find_next_key(path, level, &key) &&
btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
ret = 0;
} else {
ret = replace_path(trans, rc, root, reloc_root, path,
&next_key, level, max_level);
}
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
level = ret;
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
replaced = 1;
}
ret = walk_up_reloc_tree(reloc_root, path, &level);
if (ret > 0)
break;
BUG_ON(level == 0);
/*
* save the merging progress in the drop_progress.
* this is OK since root refs == 1 in this case.
*/
btrfs_node_key(path->nodes[level], &root_item->drop_progress,
path->slots[level]);
root_item->drop_level = level;
btrfs_end_transaction_throttle(trans);
trans = NULL;
btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
}
/*
* handle the case only one block in the fs tree need to be
* relocated and the block is tree root.
*/
leaf = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
btrfs_tree_unlock(leaf);
free_extent_buffer(leaf);
if (ret < 0)
err = ret;
out:
btrfs_free_path(path);
if (err == 0)
insert_dirty_subvol(trans, rc, root);
if (trans)
btrfs_end_transaction_throttle(trans);
btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
return err;
}
static noinline_for_stack
int prepare_to_merge(struct reloc_control *rc, int err)
{
struct btrfs_root *root = rc->extent_root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root;
struct btrfs_trans_handle *trans;
LIST_HEAD(reloc_roots);
u64 num_bytes = 0;
int ret;
mutex_lock(&fs_info->reloc_mutex);
rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2;
mutex_unlock(&fs_info->reloc_mutex);
again:
if (!err) {
num_bytes = rc->merging_rsv_size;
ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_ALL);
if (ret)
err = ret;
}
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
btrfs_block_rsv_release(fs_info, rc->block_rsv,
num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
btrfs_end_transaction(trans);
btrfs_block_rsv_release(fs_info, rc->block_rsv,
num_bytes);
goto again;
}
}
rc->merge_reloc_tree = 1;
while (!list_empty(&rc->reloc_roots)) {
reloc_root = list_entry(rc->reloc_roots.next,
struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
/*
* set reference count to 1, so btrfs_recover_relocation
* knows it should resumes merging
*/
if (!err)
btrfs_set_root_refs(&reloc_root->root_item, 1);
btrfs_update_reloc_root(trans, root);
list_add(&reloc_root->root_list, &reloc_roots);
}
list_splice(&reloc_roots, &rc->reloc_roots);
if (!err)
btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans);
return err;
}
static noinline_for_stack
void free_reloc_roots(struct list_head *list)
{
struct btrfs_root *reloc_root;
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
__del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->node = NULL;
reloc_root->commit_root = NULL;
}
}
static noinline_for_stack
void merge_reloc_roots(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_root *root;
struct btrfs_root *reloc_root;
LIST_HEAD(reloc_roots);
int found = 0;
int ret = 0;
again:
root = rc->extent_root;
/*
* this serializes us with btrfs_record_root_in_transaction,
* we have to make sure nobody is in the middle of
* adding their roots to the list while we are
* doing this splice
*/
mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
mutex_unlock(&fs_info->reloc_mutex);
while (!list_empty(&reloc_roots)) {
found = 1;
reloc_root = list_entry(reloc_roots.next,
struct btrfs_root, root_list);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
root = read_fs_root(fs_info,
reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
ret = merge_reloc_root(rc, root);
if (ret) {
if (list_empty(&reloc_root->root_list))
list_add_tail(&reloc_root->root_list,
&reloc_roots);
goto out;
}
} else {
list_del_init(&reloc_root->root_list);
}
}
if (found) {
found = 0;
goto again;
}
out:
if (ret) {
btrfs_handle_fs_error(fs_info, ret, NULL);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
/* new reloc root may be added */
mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
mutex_unlock(&fs_info->reloc_mutex);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
}
BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
}
static void free_block_list(struct rb_root *blocks)
{
struct tree_block *block;
struct rb_node *rb_node;
while ((rb_node = rb_first(blocks))) {
block = rb_entry(rb_node, struct tree_block, rb_node);
rb_erase(rb_node, blocks);
kfree(block);
}
}
static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *reloc_root)
{
struct btrfs_fs_info *fs_info = reloc_root->fs_info;
struct btrfs_root *root;
if (reloc_root->last_trans == trans->transid)
return 0;
root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
return btrfs_record_root_in_trans(trans, root);
}
static noinline_for_stack
struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct backref_node *node,
struct backref_edge *edges[])
{
struct backref_node *next;
struct btrfs_root *root;
int index = 0;
next = node;
while (1) {
cond_resched();
next = walk_up_backref(next, edges, &index);
root = next->root;
BUG_ON(!root);
BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
record_reloc_root_in_trans(trans, root);
break;
}
btrfs_record_root_in_trans(trans, root);
root = root->reloc_root;
if (next->new_bytenr != root->node->start) {
BUG_ON(next->new_bytenr);
BUG_ON(!list_empty(&next->list));
next->new_bytenr = root->node->start;
next->root = root;
list_add_tail(&next->list,
&rc->backref_cache.changed);
__mark_block_processed(rc, next);
break;
}
WARN_ON(1);
root = NULL;
next = walk_down_backref(edges, &index);
if (!next || next->level <= node->level)
break;
}
if (!root)
return NULL;
next = node;
/* setup backref node path for btrfs_reloc_cow_block */
while (1) {
rc->backref_cache.path[next->level] = next;
if (--index < 0)
break;
next = edges[index]->node[UPPER];
}
return root;
}
/*
* select a tree root for relocation. return NULL if the block
* is reference counted. we should use do_relocation() in this
* case. return a tree root pointer if the block isn't reference
* counted. return -ENOENT if the block is root of reloc tree.
*/
static noinline_for_stack
struct btrfs_root *select_one_root(struct backref_node *node)
{
struct backref_node *next;
struct btrfs_root *root;
struct btrfs_root *fs_root = NULL;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
int index = 0;
next = node;
while (1) {
cond_resched();
next = walk_up_backref(next, edges, &index);
root = next->root;
BUG_ON(!root);
/* no other choice for non-references counted tree */
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return root;
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
fs_root = root;
if (next != node)
return NULL;
next = walk_down_backref(edges, &index);
if (!next || next->level <= node->level)
break;
}
if (!fs_root)
return ERR_PTR(-ENOENT);
return fs_root;
}
static noinline_for_stack
u64 calcu_metadata_size(struct reloc_control *rc,
struct backref_node *node, int reserve)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *next = node;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
u64 num_bytes = 0;
int index = 0;
BUG_ON(reserve && node->processed);
while (next) {
cond_resched();
while (1) {
if (next->processed && (reserve || next != node))
break;
num_bytes += fs_info->nodesize;
if (list_empty(&next->upper))
break;
edge = list_entry(next->upper.next,
struct backref_edge, list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
next = walk_down_backref(edges, &index);
}
return num_bytes;
}
static int reserve_metadata_space(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct backref_node *node)
{
struct btrfs_root *root = rc->extent_root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 num_bytes;
int ret;
u64 tmp;
num_bytes = calcu_metadata_size(rc, node, 1) * 2;
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
/*
* We are under a transaction here so we can only do limited flushing.
* If we get an enospc just kick back -EAGAIN so we know to drop the
* transaction and try to refill when we can flush all the things.
*/
ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
while (tmp <= rc->reserved_bytes)
tmp <<= 1;
/*
* only one thread can access block_rsv at this point,
* so we don't need hold lock to protect block_rsv.
* we expand more reservation size here to allow enough
* space for relocation and we will return earlier in
* enospc case.
*/
rc->block_rsv->size = tmp + fs_info->nodesize *
RELOCATION_RESERVED_NODES;
return -EAGAIN;
}
return 0;
}
/*
* relocate a block tree, and then update pointers in upper level
* blocks that reference the block to point to the new location.
*
* if called by link_to_upper, the block has already been relocated.
* in that case this function just updates pointers.
*/
static int do_relocation(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct backref_node *node,
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *upper;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
struct btrfs_root *root;
struct extent_buffer *eb;
u32 blocksize;
u64 bytenr;
u64 generation;
int slot;
int ret;
int err = 0;
BUG_ON(lowest && node->eb);
path->lowest_level = node->level + 1;
rc->backref_cache.path[node->level] = node;
list_for_each_entry(edge, &node->upper, list[LOWER]) {
struct btrfs_key first_key;
cond_resched();
upper = edge->node[UPPER];
root = select_reloc_root(trans, rc, upper, edges);
BUG_ON(!root);
if (upper->eb && !upper->locked) {
if (!lowest) {
ret = btrfs_bin_search(upper->eb, key,
upper->level, &slot);
if (ret < 0) {
err = ret;
goto next;
}
BUG_ON(ret);
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (node->eb->start == bytenr)
goto next;
}
drop_node_buffer(upper);
}
if (!upper->eb) {
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret) {
if (ret < 0)
err = ret;
else
err = -ENOENT;
btrfs_release_path(path);
break;
}
if (!upper->eb) {
upper->eb = path->nodes[upper->level];
path->nodes[upper->level] = NULL;
} else {
BUG_ON(upper->eb != path->nodes[upper->level]);
}
upper->locked = 1;
path->locks[upper->level] = 0;
slot = path->slots[upper->level];
btrfs_release_path(path);
} else {
ret = btrfs_bin_search(upper->eb, key, upper->level,
&slot);
if (ret < 0) {
err = ret;
goto next;
}
BUG_ON(ret);
}
bytenr = btrfs_node_blockptr(upper->eb, slot);
if (lowest) {
if (bytenr != node->bytenr) {
btrfs_err(root->fs_info,
"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
bytenr, node->bytenr, slot,
upper->eb->start);
err = -EIO;
goto next;
}
} else {
if (node->eb->start == bytenr)
goto next;
}
blocksize = root->fs_info->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
eb = read_tree_block(fs_info, bytenr, generation,
upper->level - 1, &first_key);
if (IS_ERR(eb)) {
err = PTR_ERR(eb);
goto next;
} else if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
err = -EIO;
goto next;
}
btrfs_tree_lock(eb);
btrfs_set_lock_blocking_write(eb);
if (!node->eb) {
ret = btrfs_cow_block(trans, root, eb, upper->eb,
slot, &eb);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
if (ret < 0) {
err = ret;
goto next;
}
BUG_ON(node->eb != eb);
} else {
btrfs_set_node_blockptr(upper->eb, slot,
node->eb->start);
btrfs_set_node_ptr_generation(upper->eb, slot,
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
ret = btrfs_inc_extent_ref(trans, root,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
node->level, 0);
BUG_ON(ret);
ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
BUG_ON(ret);
}
next:
if (!upper->pending)
drop_node_buffer(upper);
else
unlock_node_buffer(upper);
if (err)
break;
}
if (!err && node->pending) {
drop_node_buffer(node);
list_move_tail(&node->list, &rc->backref_cache.changed);
node->pending = 0;
}
path->lowest_level = 0;
BUG_ON(err == -ENOSPC);
return err;
}
static int link_to_upper(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct backref_node *node,
struct btrfs_path *path)
{
struct btrfs_key key;
btrfs_node_key_to_cpu(node->eb, &key, 0);
return do_relocation(trans, rc, node, &key, path, 0);
}
static int finish_pending_nodes(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct btrfs_path *path, int err)
{
LIST_HEAD(list);
struct backref_cache *cache = &rc->backref_cache;
struct backref_node *node;
int level;
int ret;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
while (!list_empty(&cache->pending[level])) {
node = list_entry(cache->pending[level].next,
struct backref_node, list);
list_move_tail(&node->list, &list);
BUG_ON(!node->pending);
if (!err) {
ret = link_to_upper(trans, rc, node, path);
if (ret < 0)
err = ret;
}
}
list_splice_init(&list, &cache->pending[level]);
}
return err;
}
static void mark_block_processed(struct reloc_control *rc,
u64 bytenr, u32 blocksize)
{
set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
EXTENT_DIRTY);
}
static void __mark_block_processed(struct reloc_control *rc,
struct backref_node *node)
{
u32 blocksize;
if (node->level == 0 ||
in_block_group(node->bytenr, rc->block_group)) {
blocksize = rc->extent_root->fs_info->nodesize;
mark_block_processed(rc, node->bytenr, blocksize);
}
node->processed = 1;
}
/*
* mark a block and all blocks directly/indirectly reference the block
* as processed.
*/
static void update_processed_blocks(struct reloc_control *rc,
struct backref_node *node)
{
struct backref_node *next = node;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
int index = 0;
while (next) {
cond_resched();
while (1) {
if (next->processed)
break;
__mark_block_processed(rc, next);
if (list_empty(&next->upper))
break;
edge = list_entry(next->upper.next,
struct backref_edge, list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
next = walk_down_backref(edges, &index);
}
}
static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
return 1;
return 0;
}
static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
struct extent_buffer *eb;
BUG_ON(block->key_ready);
eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
block->level, NULL);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
}
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
else
btrfs_node_key_to_cpu(eb, &block->key, 0);
free_extent_buffer(eb);
block->key_ready = 1;
return 0;
}
/*
* helper function to relocate a tree block
*/
static int relocate_tree_block(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct backref_node *node,
struct btrfs_key *key,
struct btrfs_path *path)
{
struct btrfs_root *root;
int ret = 0;
if (!node)
return 0;
BUG_ON(node->processed);
root = select_one_root(node);
if (root == ERR_PTR(-ENOENT)) {
update_processed_blocks(rc, node);
goto out;
}
if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
ret = reserve_metadata_space(trans, rc, node);
if (ret)
goto out;
}
if (root) {
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
BUG_ON(node->new_bytenr);
BUG_ON(!list_empty(&node->list));
btrfs_record_root_in_trans(trans, root);
root = root->reloc_root;
node->new_bytenr = root->node->start;
node->root = root;
list_add_tail(&node->list, &rc->backref_cache.changed);
} else {
path->lowest_level = node->level;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
btrfs_release_path(path);
if (ret > 0)
ret = 0;
}
if (!ret)
update_processed_blocks(rc, node);
} else {
ret = do_relocation(trans, rc, node, key, path, 1);
}
out:
if (ret || node->level == 0 || node->cowonly)
remove_backref_node(&rc->backref_cache, node);
return ret;
}
/*
* relocate a list of blocks
*/
static noinline_for_stack
int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
struct tree_block *next;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out_free_blocks;
}
/* Kick in readahead for tree blocks with missing keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready)
readahead_tree_block(fs_info, block->bytenr);
}
/* Get first keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready) {
err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
}
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
node = build_backref_tree(rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
err = PTR_ERR(node);
goto out;
}
ret = relocate_tree_block(trans, rc, node, &block->key,
path);
if (ret < 0) {
if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
err = ret;
goto out;
}
}
out:
err = finish_pending_nodes(trans, rc, path, err);
out_free_path:
btrfs_free_path(path);
out_free_blocks:
free_block_list(blocks);
return err;
}
static noinline_for_stack
int prealloc_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
u64 alloc_hint = 0;
u64 start;
u64 end;
u64 offset = BTRFS_I(inode)->index_cnt;
u64 num_bytes;
int nr = 0;
int ret = 0;
u64 prealloc_start = cluster->start - offset;
u64 prealloc_end = cluster->end - offset;
u64 cur_offset;
struct extent_changeset *data_reserved = NULL;
BUG_ON(cluster->start != cluster->boundary[0]);
inode_lock(inode);
ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
prealloc_end + 1 - prealloc_start);
if (ret)
goto out;
cur_offset = prealloc_start;
while (nr < cluster->nr) {
start = cluster->boundary[nr] - offset;
if (nr + 1 < cluster->nr)
end = cluster->boundary[nr + 1] - 1 - offset;
else
end = cluster->end - offset;
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
num_bytes = end + 1 - start;
if (cur_offset < start)
btrfs_free_reserved_data_space(inode, data_reserved,
cur_offset, start - cur_offset);
ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
cur_offset = end + 1;
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
if (ret)
break;
nr++;
}
if (cur_offset < prealloc_end)
btrfs_free_reserved_data_space(inode, data_reserved,
cur_offset, prealloc_end + 1 - cur_offset);
out:
inode_unlock(inode);
extent_changeset_free(data_reserved);
return ret;
}
static noinline_for_stack
int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em;
int ret = 0;
em = alloc_extent_map();
if (!em)
return -ENOMEM;
em->start = start;
em->len = end + 1 - start;
em->block_len = em->len;
em->block_start = block_start;
em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
}
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
}
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
return ret;
}
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
unsigned long index;
unsigned long last_index;
struct page *page;
struct file_ra_state *ra;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int nr = 0;
int ret = 0;
if (!cluster->nr)
return 0;
ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -ENOMEM;
ret = prealloc_file_extent_cluster(inode, cluster);
if (ret)
goto out;
file_ra_state_init(ra, inode->i_mapping);
ret = setup_extent_mapping(inode, cluster->start - offset,
cluster->end - offset, cluster->start);
if (ret)
goto out;
index = (cluster->start - offset) >> PAGE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_SHIFT;
while (index <= last_index) {
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
PAGE_SIZE);
if (ret)
goto out;
page = find_lock_page(inode->i_mapping, index);
if (!page) {
page_cache_sync_readahead(inode->i_mapping,
ra, NULL, index,
last_index + 1 - index);
page = find_or_create_page(inode->i_mapping, index,
mask);
if (!page) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
ret = -ENOMEM;
goto out;
}
}
if (PageReadahead(page)) {
page_cache_async_readahead(inode->i_mapping,
ra, NULL, page, index,
last_index + 1 - index);
}
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
PAGE_SIZE, true);
ret = -EIO;
goto out;
}
}
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
set_page_extent_mapped(page);
if (nr < cluster->nr &&
page_start + offset == cluster->boundary[nr]) {
set_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
EXTENT_BOUNDARY);
nr++;
}
ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
NULL, 0);
if (ret) {
unlock_page(page);
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
PAGE_SIZE, true);
clear_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
EXTENT_LOCKED | EXTENT_BOUNDARY);
goto out;
}
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
page_start, page_end);
unlock_page(page);
put_page(page);
index++;
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
false);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
}
WARN_ON(nr != cluster->nr);
out:
kfree(ra);
return ret;
}
static noinline_for_stack
int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
struct file_extent_cluster *cluster)
{
int ret;
if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
ret = relocate_file_extent_cluster(inode, cluster);
if (ret)
return ret;
cluster->nr = 0;
}
if (!cluster->nr)
cluster->start = extent_key->objectid;
else
BUG_ON(cluster->nr >= MAX_EXTENTS);
cluster->end = extent_key->objectid + extent_key->offset - 1;
cluster->boundary[cluster->nr] = extent_key->objectid;
cluster->nr++;
if (cluster->nr >= MAX_EXTENTS) {
ret = relocate_file_extent_cluster(inode, cluster);
if (ret)
return ret;
cluster->nr = 0;
}
return 0;
}
/*
* helper to add a tree block to the list.
* the major work is getting the generation and level of the block
*/
static int add_tree_block(struct reloc_control *rc,
struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
struct btrfs_tree_block_info *bi;
struct tree_block *block;
struct rb_node *rb_node;
u32 item_size;
int level = -1;
u64 generation;
eb = path->nodes[0];
item_size = btrfs_item_size_nr(eb, path->slots[0]);
if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
item_size >= sizeof(*ei) + sizeof(*bi)) {
ei = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_extent_item);
if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
bi = (struct btrfs_tree_block_info *)(ei + 1);
level = btrfs_tree_block_level(eb, bi);
} else {
level = (int)extent_key->offset;
}
generation = btrfs_extent_generation(eb, ei);
} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
btrfs_print_v0_err(eb->fs_info);
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
return -EINVAL;
} else {
BUG();
}
btrfs_release_path(path);
BUG_ON(level == -1);
block = kmalloc(sizeof(*block), GFP_NOFS);
if (!block)
return -ENOMEM;
block->bytenr = extent_key->objectid;
block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST, block->bytenr);
return 0;
}
/*
* helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
*/
static int __add_tree_block(struct reloc_control *rc,
u64 bytenr, u32 blocksize,
struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
int ret;
bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
if (tree_block_processed(bytenr, rc))
return 0;
if (tree_search(blocks, bytenr))
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
again:
key.objectid = bytenr;
if (skinny) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = (u64)-1;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = blocksize;
}
path->search_commit_root = 1;
path->skip_locking = 1;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0 && skinny) {
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
if (key.objectid == bytenr &&
(key.type == BTRFS_METADATA_ITEM_KEY ||
(key.type == BTRFS_EXTENT_ITEM_KEY &&
key.offset == blocksize)))
ret = 0;
}
if (ret) {
skinny = false;
btrfs_release_path(path);
goto again;
}
}
if (ret) {
ASSERT(ret == 1);
btrfs_print_leaf(path->nodes[0]);
btrfs_err(fs_info,
"tree block extent item (%llu) is not found in extent tree",
bytenr);
WARN_ON(1);
ret = -EINVAL;
goto out;
}
ret = add_tree_block(rc, &key, path, blocks);
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to check if the block use full backrefs for pointers in it
*/
static int block_use_full_backref(struct reloc_control *rc,
struct extent_buffer *eb)
{
u64 flags;
int ret;
if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
return 1;
ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
eb->start, btrfs_header_level(eb), 1,
NULL, &flags);
BUG_ON(ret);
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
ret = 1;
else
ret = 0;
return ret;
}
static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct inode *inode,
u64 ino)
{
struct btrfs_key key;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
int ret = 0;
if (inode)
goto truncate;
key.objectid = ino;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
if (IS_ERR(inode))
return -ENOENT;
truncate:
ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out;
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
iput(inode);
return ret;
}
/*
* helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
* this function scans fs tree to find blocks reference the data extent
*/
static int find_data_references(struct reloc_control *rc,
struct btrfs_key *extent_key,
struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref,
struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path;
struct tree_block *block;
struct btrfs_root *root;
struct btrfs_file_extent_item *fi;
struct rb_node *rb_node;
struct btrfs_key key;
u64 ref_root;
u64 ref_objectid;
u64 ref_offset;
u32 ref_count;
u32 nritems;
int err = 0;
int added = 0;
int counted;
int ret;
ref_root = btrfs_extent_data_ref_root(leaf, ref);
ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
ref_count = btrfs_extent_data_ref_count(leaf, ref);
/*
* This is an extent belonging to the free space cache, lets just delete
* it and redo the search.
*/
if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
ret = delete_block_group_cache(fs_info, rc->block_group,
NULL, ref_objectid);
if (ret != -ENOENT)
return ret;
ret = 0;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
root = read_fs_root(fs_info, ref_root);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
}
key.objectid = ref_objectid;
key.type = BTRFS_EXTENT_DATA_KEY;
if (ref_offset > ((u64)-1 << 32))
key.offset = 0;
else
key.offset = ref_offset;
path->search_commit_root = 1;
path->skip_locking = 1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
err = ret;
goto out;
}
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
/*
* the references in tree blocks that use full backrefs
* are not counted in
*/
if (block_use_full_backref(rc, leaf))
counted = 0;
else
counted = 1;
rb_node = tree_search(blocks, leaf->start);
if (rb_node) {
if (counted)
added = 1;
else
path->slots[0] = nritems;
}
while (ref_count > 0) {
while (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
err = ret;
goto out;
}
if (WARN_ON(ret > 0))
goto out;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
added = 0;
if (block_use_full_backref(rc, leaf))
counted = 0;
else
counted = 1;
rb_node = tree_search(blocks, leaf->start);
if (rb_node) {
if (counted)
added = 1;
else
path->slots[0] = nritems;
}
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (WARN_ON(key.objectid != ref_objectid ||
key.type != BTRFS_EXTENT_DATA_KEY))
break;
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
goto next;
if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
extent_key->objectid)
goto next;
key.offset -= btrfs_file_extent_offset(leaf, fi);
if (key.offset != ref_offset)
goto next;
if (counted)
ref_count--;
if (added)
goto next;
if (!tree_block_processed(leaf->start, rc)) {
block = kmalloc(sizeof(*block), GFP_NOFS);
if (!block) {
err = -ENOMEM;
break;
}
block->bytenr = leaf->start;
btrfs_item_key_to_cpu(leaf, &block->key, 0);
block->level = 0;
block->key_ready = 1;
rb_node = tree_insert(blocks, block->bytenr,
&block->rb_node);
if (rb_node)
backref_tree_panic(rb_node, -EEXIST,
block->bytenr);
}
if (counted)
added = 1;
else
path->slots[0] = nritems;
next:
path->slots[0]++;
}
out:
btrfs_free_path(path);
return err;
}
/*
* helper to find all tree blocks that reference a given data extent
*/
static noinline_for_stack
int add_data_references(struct reloc_control *rc,
struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_extent_data_ref *dref;
struct btrfs_extent_inline_ref *iref;
unsigned long ptr;
unsigned long end;
u32 blocksize = rc->extent_root->fs_info->nodesize;
int ret = 0;
int err = 0;
eb = path->nodes[0];
ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
ptr += sizeof(struct btrfs_extent_item);
while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
key.type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_DATA);
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
key.offset = btrfs_extent_inline_ref_offset(eb, iref);
ret = __add_tree_block(rc, key.offset, blocksize,
blocks);
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
} else {
ret = -EUCLEAN;
btrfs_err(rc->extent_root->fs_info,
"extent %llu slot %d has an invalid inline ref type",
eb->start, path->slots[0]);
}
if (ret) {
err = ret;
goto out;
}
ptr += btrfs_extent_inline_ref_size(key.type);
}
WARN_ON(ptr > end);
while (1) {
cond_resched();
eb = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(eb)) {
ret = btrfs_next_leaf(rc->extent_root, path);
if (ret < 0) {
err = ret;
break;
}
if (ret > 0)
break;
eb = path->nodes[0];
}
btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
if (key.objectid != extent_key->objectid)
break;
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
ret = __add_tree_block(rc, key.offset, blocksize,
blocks);
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_extent_data_ref);
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
btrfs_print_v0_err(eb->fs_info);
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
ret = -EINVAL;
} else {
ret = 0;
}
if (ret) {
err = ret;
break;
}
path->slots[0]++;
}
out:
btrfs_release_path(path);
if (err)
free_block_list(blocks);
return err;
}
/*
* helper to find next unprocessed extent
*/
static noinline_for_stack
int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
struct btrfs_key *extent_key)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
u64 start, end, last;
int ret;
last = rc->block_group->key.objectid + rc->block_group->key.offset;
while (1) {
cond_resched();
if (rc->search_start >= last) {
ret = 1;
break;
}
key.objectid = rc->search_start;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = 0;
path->search_commit_root = 1;
path->skip_locking = 1;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
0, 0);
if (ret < 0)
break;
next:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(rc->extent_root, path);
if (ret != 0)
break;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid >= last) {
ret = 1;
break;
}
if (key.type != BTRFS_EXTENT_ITEM_KEY &&
key.type != BTRFS_METADATA_ITEM_KEY) {
path->slots[0]++;
goto next;
}
if (key.type == BTRFS_EXTENT_ITEM_KEY &&
key.objectid + key.offset <= rc->search_start) {
path->slots[0]++;
goto next;
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
key.objectid + fs_info->nodesize <=
rc->search_start) {
path->slots[0]++;
goto next;
}
ret = find_first_extent_bit(&rc->processed_blocks,
key.objectid, &start, &end,
EXTENT_DIRTY, NULL);
if (ret == 0 && start <= key.objectid) {
btrfs_release_path(path);
rc->search_start = end + 1;
} else {
if (key.type == BTRFS_EXTENT_ITEM_KEY)
rc->search_start = key.objectid + key.offset;
else
rc->search_start = key.objectid +
fs_info->nodesize;
memcpy(extent_key, &key, sizeof(key));
return 0;
}
}
btrfs_release_path(path);
return ret;
}
static void set_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
mutex_lock(&fs_info->reloc_mutex);
fs_info->reloc_ctl = rc;
mutex_unlock(&fs_info->reloc_mutex);
}
static void unset_reloc_control(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
mutex_lock(&fs_info->reloc_mutex);
fs_info->reloc_ctl = NULL;
mutex_unlock(&fs_info->reloc_mutex);
}
static int check_extent_flags(u64 flags)
{
if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
return 1;
if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
return 1;
if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
return 1;
return 0;
}
static noinline_for_stack
int prepare_to_relocate(struct reloc_control *rc)
{
struct btrfs_trans_handle *trans;
int ret;
rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
memset(&rc->cluster, 0, sizeof(rc->cluster));
rc->search_start = rc->block_group->key.objectid;
rc->extents_found = 0;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
rc->reserved_bytes = 0;
rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
RELOCATION_RESERVED_NODES;
ret = btrfs_block_rsv_refill(rc->extent_root,
rc->block_rsv, rc->block_rsv->size,
BTRFS_RESERVE_FLUSH_ALL);
if (ret)
return ret;
rc->create_reloc_tree = 1;
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
unset_reloc_control(rc);
/*
* extent tree is not a ref_cow tree and has no reloc_root to
* cleanup. And callers are responsible to free the above
* block rsv.
*/
return PTR_ERR(trans);
}
btrfs_commit_transaction(trans);
return 0;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
u64 flags;
u32 item_size;
int ret;
int err = 0;
int progress = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
ret = prepare_to_relocate(rc);
if (ret) {
err = ret;
goto out_free;
}
while (1) {
rc->reserved_bytes = 0;
ret = btrfs_block_rsv_refill(rc->extent_root,
rc->block_rsv, rc->block_rsv->size,
BTRFS_RESERVE_FLUSH_ALL);
if (ret) {
err = ret;
break;
}
progress++;
trans = btrfs_start_transaction(rc->extent_root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
trans = NULL;
break;
}
restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans);
trans = NULL;
continue;
}
ret = find_next_extent(rc, path, &key);
if (ret < 0)
err = ret;
if (ret != 0)
break;
rc->extents_found++;
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_item);
item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
if (item_size >= sizeof(*ei)) {
flags = btrfs_extent_flags(path->nodes[0], ei);
ret = check_extent_flags(flags);
BUG_ON(ret);
} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
err = -EINVAL;
btrfs_print_v0_err(trans->fs_info);
btrfs_abort_transaction(trans, err);
break;
} else {
BUG();
}
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ret = add_tree_block(rc, &key, path, &blocks);
} else if (rc->stage == UPDATE_DATA_PTRS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
ret = add_data_references(rc, &key, path, &blocks);
} else {
btrfs_release_path(path);
ret = 0;
}
if (ret < 0) {
err = ret;
break;
}
if (!RB_EMPTY_ROOT(&blocks)) {
ret = relocate_tree_blocks(trans, rc, &blocks);
if (ret < 0) {
/*
* if we fail to relocate tree blocks, force to update
* backref cache when committing transaction.
*/
rc->backref_cache.last_trans = trans->transid - 1;
if (ret != -EAGAIN) {
err = ret;
break;
}
rc->extents_found--;
rc->search_start = key.objectid;
}
}
btrfs_end_transaction_throttle(trans);
btrfs_btree_balance_dirty(fs_info);
trans = NULL;
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
rc->found_file_extent = 1;
ret = relocate_data_extent(rc->data_inode,
&key, &rc->cluster);
if (ret < 0) {
err = ret;
break;
}
}
}
if (trans && progress && err == -ENOSPC) {
ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
if (ret == 1) {
err = 0;
progress = 0;
goto restart;
}
}
btrfs_release_path(path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
if (trans) {
btrfs_end_transaction_throttle(trans);
btrfs_btree_balance_dirty(fs_info);
}
if (!err) {
ret = relocate_file_extent_cluster(rc->data_inode,
&rc->cluster);
if (ret < 0)
err = ret;
}
rc->create_reloc_tree = 0;
set_reloc_control(rc);
backref_cache_cleanup(&rc->backref_cache);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
err = prepare_to_merge(rc, err);
merge_reloc_roots(rc);
rc->merge_reloc_tree = 0;
unset_reloc_control(rc);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
}
btrfs_commit_transaction(trans);
ret = clean_dirty_subvols(rc);
if (ret < 0 && !err)
err = ret;
out_free:
btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
return err;
}
static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid)
{
struct btrfs_path *path;
struct btrfs_inode_item *item;
struct extent_buffer *leaf;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_insert_empty_inode(trans, root, path, objectid);
if (ret)
goto out;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, 0);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return ret;
}
/*
* helper to create inode for data relocation.
* the inode is in data relocation tree and its link count is 0
*/
static noinline_for_stack
struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *group)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
struct btrfs_key key;
u64 objectid;
int err = 0;
root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(root))
return ERR_CAST(root);
trans = btrfs_start_transaction(root, 6);
if (IS_ERR(trans))
return ERR_CAST(trans);
err = btrfs_find_free_objectid(root, &objectid);
if (err)
goto out;
err = __insert_orphan_inode(trans, root, objectid);
BUG_ON(err);
key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
BUG_ON(IS_ERR(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
if (err) {
if (inode)
iput(inode);
inode = ERR_PTR(err);
}
return inode;
}
static struct reloc_control *alloc_reloc_control(void)
{
struct reloc_control *rc;
rc = kzalloc(sizeof(*rc), GFP_NOFS);
if (!rc)
return NULL;
INIT_LIST_HEAD(&rc->reloc_roots);
INIT_LIST_HEAD(&rc->dirty_subvol_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
extent_io_tree_init(&rc->processed_blocks, NULL);
return rc;
}
/*
* Print the block group being relocated
*/
static void describe_relocation(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group)
{
char buf[128] = {'\0'};
btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
btrfs_info(fs_info,
"relocating block group %llu flags %s",
block_group->key.objectid, buf);
}
/*
* function to relocate all extents in a block group.
*/
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
struct btrfs_block_group_cache *bg;
struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
struct btrfs_path *path;
int ret;
int rw = 0;
int err = 0;
bg = btrfs_lookup_block_group(fs_info, group_start);
if (!bg)
return -ENOENT;
if (btrfs_pinned_by_swapfile(fs_info, bg)) {
btrfs_put_block_group(bg);
return -ETXTBSY;
}
rc = alloc_reloc_control();
if (!rc) {
btrfs_put_block_group(bg);
return -ENOMEM;
}
rc->extent_root = extent_root;
rc->block_group = bg;
ret = btrfs_inc_block_group_ro(rc->block_group);
if (ret) {
err = ret;
goto out;
}
rw = 1;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
goto out;
}
inode = lookup_free_space_inode(fs_info, rc->block_group, path);
btrfs_free_path(path);
if (!IS_ERR(inode))
ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
else
ret = PTR_ERR(inode);
if (ret && ret != -ENOENT) {
err = ret;
goto out;
}
rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
if (IS_ERR(rc->data_inode)) {
err = PTR_ERR(rc->data_inode);
rc->data_inode = NULL;
goto out;
}
describe_relocation(fs_info, rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, U64_MAX,
rc->block_group->key.objectid,
rc->block_group->key.offset);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
ret = relocate_block_group(rc);
mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
err = ret;
goto out;
}
if (rc->extents_found == 0)
break;
btrfs_info(fs_info, "found %llu extents", rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
ret = btrfs_wait_ordered_range(rc->data_inode, 0,
(u64)-1);
if (ret) {
err = ret;
goto out;
}
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;
}
}
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
kfree(rc);
return err;
}
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret, err;
trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress));
root->root_item.drop_level = 0;
btrfs_set_root_refs(&root->root_item, 0);
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
err = btrfs_end_transaction(trans);
if (err)
return err;
return ret;
}
/*
* recover relocation interrupted by system crash.
*
* this function resumes merging reloc trees with corresponding fs trees.
* this is important for keeping the sharing of tree blocks
*/
int btrfs_recover_relocation(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(reloc_roots);
struct btrfs_key key;
struct btrfs_root *fs_root;
struct btrfs_root *reloc_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct reloc_control *rc = NULL;
struct btrfs_trans_handle *trans;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_BACK;
key.objectid = BTRFS_TREE_RELOC_OBJECTID;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
key.type != BTRFS_ROOT_ITEM_KEY)
break;
reloc_root = btrfs_read_fs_root(root, &key);
if (IS_ERR(reloc_root)) {
err = PTR_ERR(reloc_root);
goto out;
}
list_add(&reloc_root->root_list, &reloc_roots);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
fs_root = read_fs_root(fs_info,
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
if (ret != -ENOENT) {
err = ret;
goto out;
}
ret = mark_garbage_root(reloc_root);
if (ret < 0) {
err = ret;
goto out;
}
}
}
if (key.offset == 0)
break;
key.offset--;
}
btrfs_release_path(path);
if (list_empty(&reloc_roots))
goto out;
rc = alloc_reloc_control();
if (!rc) {
err = -ENOMEM;
goto out;
}
rc->extent_root = fs_info->extent_root;
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
unset_reloc_control(rc);
err = PTR_ERR(trans);
goto out_free;
}
rc->merge_reloc_tree = 1;
while (!list_empty(&reloc_roots)) {
reloc_root = list_entry(reloc_roots.next,
struct btrfs_root, root_list);
list_del(&reloc_root->root_list);
if (btrfs_root_refs(&reloc_root->root_item) == 0) {
list_add_tail(&reloc_root->root_list,
&rc->reloc_roots);
continue;
}
fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
goto out_free;
}
err = __add_reloc_root(reloc_root);
BUG_ON(err < 0); /* -ENOMEM or logic error */
fs_root->reloc_root = reloc_root;
}
err = btrfs_commit_transaction(trans);
if (err)
goto out_free;
merge_reloc_roots(rc);
unset_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
goto out_free;
}
err = btrfs_commit_transaction(trans);
ret = clean_dirty_subvols(rc);
if (ret < 0 && !err)
err = ret;
out_free:
kfree(rc);
out:
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
btrfs_free_path(path);
if (err == 0) {
/* cleanup orphan inode in data relocation tree */
fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
else
err = btrfs_orphan_cleanup(fs_root);
}
return err;
}
/*
* helper to add ordered checksum for data relocation.
*
* cloning checksum properly handles the nodatasum extents.
* it also saves CPU time to re-calculate the checksum.
*/
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered;
int ret;
u64 disk_bytenr;
u64 new_bytenr;
LIST_HEAD(list);
ordered = btrfs_lookup_ordered_extent(inode, file_pos);
BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0);
if (ret)
goto out;
while (!list_empty(&list)) {
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
list_del_init(&sums->list);
/*
* We need to offset the new_bytenr based on where the csum is.
* We need to do this because we will read in entire prealloc
* extents but we may have written to say the middle of the
* prealloc extent, so we need to make sure the csum goes with
* the right disk offset.
*
* We can do this because the data reloc inode refers strictly
* to the on disk bytes, so we don't have to worry about
* disk_len vs real len like with real inodes since it's all
* disk length.
*/
new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
sums->bytenr = new_bytenr;
btrfs_add_ordered_sum(inode, ordered, sums);
}
out:
btrfs_put_ordered_extent(ordered);
return ret;
}
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *cow)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct reloc_control *rc;
struct backref_node *node;
int first_cow = 0;
int level;
int ret = 0;
rc = fs_info->reloc_ctl;
if (!rc)
return 0;
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (buf == root->node)
__update_reloc_root(root, cow->start);
}
level = btrfs_header_level(buf);
if (btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item))
first_cow = 1;
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
rc->create_reloc_tree) {
WARN_ON(!first_cow && level == 0);
node = rc->backref_cache.path[level];
BUG_ON(node->bytenr != buf->start &&
node->new_bytenr != buf->start);
drop_node_buffer(node);
extent_buffer_get(cow);
node->eb = cow;
node->new_bytenr = cow->start;
if (!node->pending) {
list_move_tail(&node->list,
&rc->backref_cache.pending[level]);
node->pending = 1;
}
if (first_cow)
__mark_block_processed(rc, node);
if (first_cow && level > 0)
rc->nodes_relocated += buf->len;
}
if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
ret = replace_file_extents(trans, rc, root, cow);
return ret;
}
/*
* called before creating snapshot. it calculates metadata reservation
* required for relocating tree blocks in the snapshot
*/
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve)
{
struct btrfs_root *root;
struct reloc_control *rc;
root = pending->root;
if (!root->reloc_root)
return;
rc = root->fs_info->reloc_ctl;
if (!rc->merge_reloc_tree)
return;
root = root->reloc_root;
BUG_ON(btrfs_root_refs(&root->root_item) == 0);
/*
* relocation is in the stage of merging trees. the space
* used by merging a reloc tree is twice the size of
* relocated tree nodes in the worst case. half for cowing
* the reloc tree, half for cowing the fs tree. the space
* used by cowing the reloc tree will be freed after the
* tree is dropped. if we create snapshot, cowing the fs
* tree may use more space than it frees. so we need
* reserve extra space.
*/
*bytes_to_reserve += rc->nodes_relocated;
}
/*
* called after snapshot is created. migrate block reservation
* and create reloc root for the newly created snapshot
*/
int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending)
{
struct btrfs_root *root = pending->root;
struct btrfs_root *reloc_root;
struct btrfs_root *new_root;
struct reloc_control *rc;
int ret;
if (!root->reloc_root)
return 0;
rc = root->fs_info->reloc_ctl;
rc->merging_rsv_size += rc->nodes_relocated;
if (rc->merge_reloc_tree) {
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
rc->nodes_relocated, true);
if (ret)
return ret;
}
new_root = pending->snap;
reloc_root = create_reloc_root(trans, root->reloc_root,
new_root->root_key.objectid);
if (IS_ERR(reloc_root))
return PTR_ERR(reloc_root);
ret = __add_reloc_root(reloc_root);
BUG_ON(ret < 0);
new_root->reloc_root = reloc_root;
if (rc->create_reloc_tree)
ret = clone_backref_node(trans, rc, root, reloc_root);
return ret;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SCSI_EH_H
#define _SCSI_SCSI_EH_H
#include <linux/scatterlist.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_common.h>
struct scsi_device;
struct Scsi_Host;
extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
struct list_head *done_q);
extern void scsi_eh_flush_done_q(struct list_head *done_q);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
extern int scsi_check_sense(struct scsi_cmnd *);
static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
{
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
extern bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
u64 *info_out);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
int result;
unsigned int resid_len;
int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
unsigned char cmd_len;
unsigned char prot_op;
unsigned char *cmnd;
struct scsi_data_buffer sdb;
struct request *next_rq;
/* new command support */
unsigned char eh_cmnd[BLK_MAX_CDB];
struct scatterlist sense_sgl;
};
extern void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd,
struct scsi_eh_save *ses, unsigned char *cmnd,
int cmnd_size, unsigned sense_bytes);
extern void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd,
struct scsi_eh_save *ses);
#endif /* _SCSI_SCSI_EH_H */
|
package com.cloupia.feature.purestorage.tasks;
import java.util.Arrays;
import com.cisco.cuic.api.client.WorkflowInputFieldTypeDeclaration;
import com.cloupia.feature.purestorage.constants.PureConstants;
import com.cloupia.service.cIM.inframgr.TaskOutputDefinition;
import com.cloupia.service.cIM.inframgr.customactions.CustomActionLogger;
import com.cloupia.service.cIM.inframgr.customactions.CustomActionTriggerContext;
public class ScheduleVolumeSnapshotTask extends GeneralTask
{
public ScheduleVolumeSnapshotTask()
{
super(PureConstants.TASK_NAME_SCHEDULE_VOLUME_SNAPSHOT, "com.cloupia.feature.purestorage.tasks.ScheduleVolumeSnapshotTaskConfig");
}
@Override
public void executeCustomAction(CustomActionTriggerContext context, CustomActionLogger actionlogger) throws Exception
{
ScheduleVolumeSnapshotTaskConfig config = (ScheduleVolumeSnapshotTaskConfig) context.loadConfigObject();
super.accountName = config.getAccountName();
super.executeCustomAction(context, actionlogger);
actionlogger.addInfo("finished checking ScheduleVolumeSnapshotTask accountname");
final String volumeName = config.getVolumeName();
String protectionGroupName = volumeName + "PGroup";
Boolean deleteScheduleSnapshotFlag = config.getDeleteScheduleSnapshotFlag();
config.setScheduleSnapshotFlag(true);
if(deleteScheduleSnapshotFlag == null)
{
String cF= config.getCreateFrequency();
if (cF.equals(""))cF= "0";
int createFrequency = Integer.valueOf(cF);
String fU = config.getFrequencyUnit();
if(fU.equals("m")) fU= "60";
else if(fU.equals("h")) fU ="3600";
else if(fU.equals("d")) fU ="3600*24";
else fU ="0";
int frequencyUnit = Integer.valueOf(fU);
String sT = config.getSetTime();
if(sT.equals("")) sT="0";
int setTime = Integer.valueOf(sT) * 3600;
String rP = config.getRetainPeriod();
if(rP.equals("")) rP="0";
int retainPeriod = Integer.valueOf(rP);
String rU = config.getRetainUnit();
if(rU.equals("m")) rU= "60";
else if(rU.equals("h")) rU ="3600";
else if(rU.equals("d")) rU ="3600*24";
else rU ="0";
int retainUnit = Integer.valueOf(rU);
String rN = config.getRetainNumber();
if(rN.equals("")) rN="0";
int retainSnapshot = Integer.valueOf(rN);
String d = config.getMoreDuration();
if(d.equals("")) d="0";
int duration = Integer.valueOf(d);
int frequency = createFrequency * frequencyUnit;
int period = retainPeriod * retainUnit;
try
{
CLIENT.protectionGroups().createOnVolumes(protectionGroupName,Arrays.asList(volumeName));
}
catch(Exception e) {
actionlogger.addInfo("There is already an exsiting snap schedule for Volume " + volumeName + ". So just reschedule it.");
}
CLIENT.protectionGroups().enableSnapshot(protectionGroupName);
if(frequencyUnit == 3600 *24 && createFrequency >0)
{
CLIENT.protectionGroups().setSchedule(protectionGroupName, null, null, null, frequency, setTime);
}
else
{
CLIENT.protectionGroups().setSchedule(protectionGroupName, null, null, null, frequency, null);
}
CLIENT.protectionGroups().setRetention(protectionGroupName, period, retainSnapshot, duration, null, null, null);
actionlogger.addInfo("Scheduled snapshot for volume " + volumeName + "on Pure FlashArray [" + flashArrayAccount.getManagementAddress() + "]");
context.getChangeTracker().undoableResourceModified("AssetType", "idstring", "Scheduled Snapshot",
"Snapshots have been scheduled" + config.getAccountName(),
new DeleteScheduleSnapshotTask().getTaskName(), new DeleteScheduleSnapshotTaskConfig(config));
String volIdentity =accountName+"@"+volumeName;
//String snapIdentity =accountName+"@"+snapShotName;
context.saveOutputValue(PureConstants.TASK_OUTPUT_NAME_VOLUME_IDENTITY, volIdentity);
actionlogger.addInfo("Volume Identity as Output is saved");
}
else
{
actionlogger.addInfo("This is a rollback task for deleted scheduled snapshot for volume" + volumeName);
CLIENT.protectionGroups().enableSnapshot(protectionGroupName);
}
}
public TaskOutputDefinition[] getTaskOutputDefinitions()
{
TaskOutputDefinition[] ops = new TaskOutputDefinition[1];
ops[0] = new TaskOutputDefinition(
PureConstants.TASK_OUTPUT_NAME_VOLUME_IDENTITY,
WorkflowInputFieldTypeDeclaration.GENERIC_TEXT,
"Volume Identity");
return ops;
}
} |
/*
* _dcd.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* ======== _dcd.h ========
* Description:
* Includes the wrapper functions called directly by the
* DeviceIOControl interface.
*
* Public Functions:
* WCD_CallDevIOCtl
* WCD_Init
* WCD_InitComplete2
* WCD_Exit
* <MOD>WRAP_*
*
* Notes:
* Compiled with CDECL calling convention.
*
*! Revision History:
*! ================
*! 19-Apr-2004 sb Aligned DMM definitions with Symbian
*! 08-Mar-2004 sb Added the Dynamic Memory Mapping feature
*! 30-Jan-2002 ag Renamed CMMWRAP_AllocBuf to CMMWRAP_CallocBuf.
*! 22-Nov-2000 kc: Added MGRWRAP_GetPerf_Data to acquire PERF stats.
*! 27-Oct-2000 jeh Added NODEWRAP_AllocMsgBuf, NODEWRAP_FreeMsgBuf. Removed
*! NODEWRAP_GetMessageStream.
*! 10-Oct-2000 ag: Added user CMM wrappers.
*! 04-Aug-2000 rr: MEMWRAP and UTIL_Wrap added.
*! 27-Jul-2000 rr: NODEWRAP, STRMWRAP added.
*! 27-Jun-2000 rr: MGRWRAP fxns added.IFDEF to build for PM or DSP/BIOS Bridge
*! 03-Dec-1999 rr: WCD_InitComplete2 enabled for BRD_AutoStart.
*! 09-Nov-1999 kc: Added MEMRY.
*! 02-Nov-1999 ag: Added CHNL.
*! 08-Oct-1999 rr: Utilwrap_Testdll fxn added
*! 24-Sep-1999 rr: header changed from _wcd.h to _dcd.h
*! 09-Sep-1997 gp: Created.
*/
#ifndef _WCD_
#define _WCD_
#include <dspbridge/wcdioctl.h>
/*
* ======== WCD_CallDevIOCtl ========
* Purpose:
* Call the (wrapper) function for the corresponding WCD IOCTL.
* Parameters:
* cmd: IOCTL id, base 0.
* args: Argument structure.
* pResult:
* Returns:
* DSP_SOK if command called; DSP_EINVALIDARG if command not in IOCTL
* table.
* Requires:
* Ensures:
*/
extern DSP_STATUS WCD_CallDevIOCtl(unsigned int cmd,
union Trapped_Args *args,
u32 *pResult);
/*
* ======== WCD_Init ========
* Purpose:
* Initialize WCD modules, and export WCD services to WMD's.
* This procedure is called when the class driver is loaded.
* Parameters:
* Returns:
* TRUE if success; FALSE otherwise.
* Requires:
* Ensures:
*/
extern bool WCD_Init(void);
/*
* ======== WCD_InitComplete2 ========
* Purpose:
* Perform any required WCD, and WMD initialization which
* cannot not be performed in WCD_Init(void) or DEV_StartDevice() due
* to the fact that some services are not yet
* completely initialized.
* Parameters:
* Returns:
* DSP_SOK: Allow this device to load
* DSP_EFAIL: Failure.
* Requires:
* WCD initialized.
* Ensures:
*/
extern DSP_STATUS WCD_InitComplete2(void);
/*
* ======== WCD_Exit ========
* Purpose:
* Exit all modules initialized in WCD_Init(void).
* This procedure is called when the class driver is unloaded.
* Parameters:
* Returns:
* Requires:
* WCD_Init(void) was previously called.
* Ensures:
* Resources acquired in WCD_Init(void) are freed.
*/
extern void WCD_Exit(void);
/* MGR wrapper functions */
extern u32 MGRWRAP_EnumNode_Info(union Trapped_Args *args);
extern u32 MGRWRAP_EnumProc_Info(union Trapped_Args *args);
extern u32 MGRWRAP_RegisterObject(union Trapped_Args *args);
extern u32 MGRWRAP_UnregisterObject(union Trapped_Args *args);
extern u32 MGRWRAP_WaitForBridgeEvents(union Trapped_Args *args);
#ifndef RES_CLEANUP_DISABLE
extern u32 MGRWRAP_GetProcessResourcesInfo(union Trapped_Args *args);
#endif
/* CPRC (Processor) wrapper Functions */
extern u32 PROCWRAP_Attach(union Trapped_Args *args);
extern u32 PROCWRAP_Ctrl(union Trapped_Args *args);
extern u32 PROCWRAP_Detach(union Trapped_Args *args);
extern u32 PROCWRAP_EnumNode_Info(union Trapped_Args *args);
extern u32 PROCWRAP_EnumResources(union Trapped_Args *args);
extern u32 PROCWRAP_GetState(union Trapped_Args *args);
extern u32 PROCWRAP_GetTrace(union Trapped_Args *args);
extern u32 PROCWRAP_Load(union Trapped_Args *args);
extern u32 PROCWRAP_RegisterNotify(union Trapped_Args *args);
extern u32 PROCWRAP_Start(union Trapped_Args *args);
extern u32 PROCWRAP_ReserveMemory(union Trapped_Args *args);
extern u32 PROCWRAP_UnReserveMemory(union Trapped_Args *args);
extern u32 PROCWRAP_Map(union Trapped_Args *args);
extern u32 PROCWRAP_UnMap(union Trapped_Args *args);
extern u32 PROCWRAP_FlushMemory(union Trapped_Args *args);
extern u32 PROCWRAP_Stop(union Trapped_Args *args);
extern u32 PROCWRAP_InvalidateMemory(union Trapped_Args *args);
/* NODE wrapper functions */
extern u32 NODEWRAP_Allocate(union Trapped_Args *args);
extern u32 NODEWRAP_AllocMsgBuf(union Trapped_Args *args);
extern u32 NODEWRAP_ChangePriority(union Trapped_Args *args);
extern u32 NODEWRAP_Connect(union Trapped_Args *args);
extern u32 NODEWRAP_Create(union Trapped_Args *args);
extern u32 NODEWRAP_Delete(union Trapped_Args *args);
extern u32 NODEWRAP_FreeMsgBuf(union Trapped_Args *args);
extern u32 NODEWRAP_GetAttr(union Trapped_Args *args);
extern u32 NODEWRAP_GetMessage(union Trapped_Args *args);
extern u32 NODEWRAP_Pause(union Trapped_Args *args);
extern u32 NODEWRAP_PutMessage(union Trapped_Args *args);
extern u32 NODEWRAP_RegisterNotify(union Trapped_Args *args);
extern u32 NODEWRAP_Run(union Trapped_Args *args);
extern u32 NODEWRAP_Terminate(union Trapped_Args *args);
extern u32 NODEWRAP_GetUUIDProps(union Trapped_Args *args);
/* STRM wrapper functions */
extern u32 STRMWRAP_AllocateBuffer(union Trapped_Args *args);
extern u32 STRMWRAP_Close(union Trapped_Args *args);
extern u32 STRMWRAP_FreeBuffer(union Trapped_Args *args);
extern u32 STRMWRAP_GetEventHandle(union Trapped_Args *args);
extern u32 STRMWRAP_GetInfo(union Trapped_Args *args);
extern u32 STRMWRAP_Idle(union Trapped_Args *args);
extern u32 STRMWRAP_Issue(union Trapped_Args *args);
extern u32 STRMWRAP_Open(union Trapped_Args *args);
extern u32 STRMWRAP_Reclaim(union Trapped_Args *args);
extern u32 STRMWRAP_RegisterNotify(union Trapped_Args *args);
extern u32 STRMWRAP_Select(union Trapped_Args *args);
extern u32 CMMWRAP_CallocBuf(union Trapped_Args *args);
extern u32 CMMWRAP_FreeBuf(union Trapped_Args *args);
extern u32 CMMWRAP_GetHandle(union Trapped_Args *args);
extern u32 CMMWRAP_GetInfo(union Trapped_Args *args);
#endif /* _WCD_ */
|
/************************************************************************
* Unreal Internet Relay Chat Daemo, src/aln.c
* (C) 2000 Carsten Munk (Techie/Stskeeps) <stskeeps@tspre.org>
* Copyright (C) 2000 Lucas Madar [bahamut team]
*
* See file AUTHORS in IRC package for additional names of
* the programmers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 1, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef STANDALONE
#include "struct.h"
#include "common.h"
#include "sys.h"
#include "numeric.h"
#include "msg.h"
#include "channel.h"
#include "version.h"
#endif
#include <time.h>
#include <sys/stat.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WIN32
#include <io.h>
#endif
#include <fcntl.h>
#ifndef STANDALONE
#include "h.h"
#include "proto.h"
ID_Copyright("(C) Carsten Munk 2000");
#endif
static inline char *int_to_base64(long);
static inline long base64_to_int(char *);
Link *Servers = NULL;
char *base64enc(long i)
{
if (i < 0)
return ("0");
return int_to_base64(i);
}
long base64dec(char *b64)
{
if (b64)
return base64_to_int(b64);
else
return 0;
}
int numeric_collides(long numeric)
{
Link *lp;
if (!numeric)
return 0;
for (lp = Servers; lp; lp = lp->next)
if (numeric == lp->value.cptr->serv->numeric)
return 1;
return 0;
}
void add_server_to_table(aClient *what)
{
Link *ptr;
if (IsServer(what) || IsMe(what))
{
ptr = make_link();
ptr->value.cptr = what;
ptr->flags = what->serv->numeric;
ptr->next = Servers;
Servers = ptr;
}
}
void remove_server_from_table(aClient *what)
{
Link **curr;
Link *tmp;
Link *lp = Servers;
for (; lp && (lp->value.cptr == what); lp = lp->next);
for (;;)
{
for (curr = &Servers; (tmp = *curr); curr = &tmp->next)
if (tmp->value.cptr == what)
{
*curr = tmp->next;
free_link(tmp);
break;
}
if (lp)
break;
}
}
aClient *find_server_by_numeric(long value)
{
Link *lp;
for (lp = Servers; lp; lp = lp->next)
if (lp->value.cptr->serv->numeric == value)
return (lp->value.cptr);
return NULL;
}
aClient *find_server_by_base64(char *b64)
{
if (b64)
return find_server_by_numeric(base64dec(b64));
else
return NULL;
}
char *find_server_id(aClient *which)
{
return (base64enc(which->serv->numeric));
}
aClient *find_server_quick_search(char *name)
{
Link *lp;
for (lp = Servers; lp; lp = lp->next)
if (!match(name, lp->value.cptr->name))
return (lp->value.cptr);
return NULL;
}
aClient *find_server_quick_straight(char *name)
{
Link *lp;
for (lp = Servers; lp; lp = lp->next)
if (!strcmp(name, lp->value.cptr->name))
return (lp->value.cptr);
return NULL;
}
aClient *find_server_quickx(char *name, aClient *cptr)
{
if (name)
cptr = (aClient *)find_server_quick_search(name);
return cptr;
}
aClient *find_server_b64_or_real(char *name)
{
Link *lp;
long namebase64;
if (!name)
return NULL;
if (strlen(name) < 3)
{
namebase64 = base64dec(name);
for (lp = Servers; lp; lp = lp->next)
if (lp->value.cptr->serv->numeric == namebase64)
return (lp->value.cptr);
}
else
return find_server_quick_straight(name);
return NULL;
}
/* ':' and '#' and '&' and '+' and '@' must never be in this table. */
/* these tables must NEVER CHANGE! >) */
char int6_to_base64_map[] = {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V',
'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'{', '}'
};
char base64_to_int6_map[] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
-1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1,
-1, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, -1, 63, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
static inline char *int_to_base64(long val)
{
/* 32/6 == max 6 bytes for representation,
* +1 for the null, +1 for byte boundaries
*/
static char base64buf[8];
long i = 7;
base64buf[i] = '\0';
/* Temporary debugging code.. remove before 2038 ;p.
* This might happen in case of 64bit longs (opteron/ia64),
* if the value is then too large it can easily lead to
* a buffer underflow and thus to a crash. -- Syzop
*/
if (val > 2147483647L)
{
abort();
}
do
{
base64buf[--i] = int6_to_base64_map[val & 63];
}
while (val >>= 6);
return base64buf + i;
}
static inline long base64_to_int(char *b64)
{
int v = base64_to_int6_map[(u_char)*b64++];
if (!b64)
return 0;
while (*b64)
{
v <<= 6;
v += base64_to_int6_map[(u_char)*b64++];
}
return v;
}
void ns_stats(aClient *cptr)
{
Link *lp;
aClient *sptr;
for (lp = Servers; lp; lp = lp->next)
{
sptr = lp->value.cptr;
sendto_one(cptr,
":%s NOTICE %s :*** server=%s numeric=%i b64=%s [%s]", me.name,
cptr->name, sptr->name, sptr->serv->numeric,
find_server_id(sptr), find_server_b64_or_real(find_server_id(sptr)) == sptr ? "SANE" : "INSANE");
}
}
|
<html lang="en">
<head>
<title>Empty Exprs - Using as</title>
<meta http-equiv="Content-Type" content="text/html">
<meta name="description" content="Using as">
<meta name="generator" content="makeinfo 4.13">
<link title="Top" rel="start" href="index.html#Top">
<link rel="up" href="Expressions.html#Expressions" title="Expressions">
<link rel="next" href="Integer-Exprs.html#Integer-Exprs" title="Integer Exprs">
<link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
<!--
This file documents the GNU Assembler "as".
Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation,
Inc.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3
or any later version published by the Free Software Foundation;
with no Invariant Sections, with no Front-Cover Texts, and with no
Back-Cover Texts. A copy of the license is included in the
section entitled ``GNU Free Documentation License''.
-->
<meta http-equiv="Content-Style-Type" content="text/css">
<style type="text/css"><!--
pre.display { font-family:inherit }
pre.format { font-family:inherit }
pre.smalldisplay { font-family:inherit; font-size:smaller }
pre.smallformat { font-family:inherit; font-size:smaller }
pre.smallexample { font-size:smaller }
pre.smalllisp { font-size:smaller }
span.sc { font-variant:small-caps }
span.roman { font-family:serif; font-weight:normal; }
span.sansserif { font-family:sans-serif; font-weight:normal; }
--></style>
<link rel="stylesheet" type="text/css" href="../cs.css">
</head>
<body>
<div class="node">
<a name="Empty-Exprs"></a>
<p>
Next: <a rel="next" accesskey="n" href="Integer-Exprs.html#Integer-Exprs">Integer Exprs</a>,
Up: <a rel="up" accesskey="u" href="Expressions.html#Expressions">Expressions</a>
<hr>
</div>
<h3 class="section">6.1 Empty Expressions</h3>
<p><a name="index-empty-expressions-244"></a><a name="index-expressions_002c-empty-245"></a>An empty expression has no value: it is just whitespace or null.
Wherever an absolute expression is required, you may omit the
expression, and <samp><span class="command">as</span></samp> assumes a value of (absolute) 0. This
is compatible with other assemblers.
</body></html>
|
// binary.cc -- binary input files for gold
// Copyright (C) 2008-2016 Free Software Foundation, Inc.
// Written by Ian Lance Taylor <iant@google.com>.
// This file is part of gold.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
// MA 02110-1301, USA.
#include "gold.h"
#include <cerrno>
#include <cstring>
#include "elfcpp.h"
#include "stringpool.h"
#include "fileread.h"
#include "output.h"
#include "binary.h"
// safe-ctype.h interferes with macros defined by the system <ctype.h>.
// Some C++ system headers might include <ctype.h> and rely on its macro
// definitions being intact. So make sure that safe-ctype.h is included
// only after any C++ system headers, whether directly here (above) or via
// other local header files (e.g. #include <string> in "binary.h").
#include "safe-ctype.h"
// Support for reading binary files as input. These become blobs in
// the final output. These files are treated as though they have a
// single .data section and define three symbols:
// _binary_FILENAME_start, _binary_FILENAME_end, _binary_FILENAME_size.
// The FILENAME is the name of the input file, with any
// non-alphanumeric character changed to an underscore.
// We implement this by creating an ELF file in memory.
namespace gold
{
// class Binary_to_elf.
Binary_to_elf::Binary_to_elf(elfcpp::EM machine, int size, bool big_endian,
const std::string& filename)
: elf_machine_(machine), size_(size), big_endian_(big_endian),
filename_(filename), data_(NULL), filesize_(0)
{
}
Binary_to_elf::~Binary_to_elf()
{
if (this->data_ != NULL)
delete[] this->data_;
}
// Given FILENAME, create a buffer which looks like an ELF file with
// the contents of FILENAME as the contents of the only section. The
// TASK parameters is mainly for debugging, and records who holds
// locks.
bool
Binary_to_elf::convert(const Task* task)
{
if (this->size_ == 32)
{
if (!this->big_endian_)
{
#ifdef HAVE_TARGET_32_LITTLE
return this->sized_convert<32, false>(task);
#else
gold_unreachable();
#endif
}
else
{
#ifdef HAVE_TARGET_32_BIG
return this->sized_convert<32, true>(task);
#else
gold_unreachable();
#endif
}
}
else if (this->size_ == 64)
{
if (!this->big_endian_)
{
#ifdef HAVE_TARGET_64_LITTLE
return this->sized_convert<64, false>(task);
#else
gold_unreachable();
#endif
}
else
{
#ifdef HAVE_TARGET_64_BIG
return this->sized_convert<64, true>(task);
#else
gold_unreachable();
#endif
}
}
else
gold_unreachable();
}
// We are going to create:
// * The ELF file header.
// * Five sections: null section, .data, .symtab, .strtab, .shstrtab
// * The contents of the file.
// * Four symbols: null, begin, end, size.
// * Three symbol names.
// * Four section names.
template<int size, bool big_endian>
bool
Binary_to_elf::sized_convert(const Task* task)
{
// Read the input file.
File_read f;
if (!f.open(task, this->filename_))
{
gold_error(_("cannot open %s: %s:"), this->filename_.c_str(),
strerror(errno));
return false;
}
section_size_type filesize = convert_to_section_size_type(f.filesize());
const unsigned char* fileview;
if (filesize == 0)
fileview = NULL;
else
fileview = f.get_view(0, 0, filesize, false, false);
unsigned int align;
if (size == 32)
align = 4;
else if (size == 64)
align = 8;
else
gold_unreachable();
section_size_type aligned_filesize = align_address(filesize, align);
// Build the stringpool for the symbol table.
std::string mangled_name = this->filename_;
for (std::string::iterator p = mangled_name.begin();
p != mangled_name.end();
++p)
if (!ISALNUM(*p))
*p = '_';
mangled_name = "_binary_" + mangled_name;
std::string start_symbol_name = mangled_name + "_start";
std::string end_symbol_name = mangled_name + "_end";
std::string size_symbol_name = mangled_name + "_size";
Stringpool strtab;
strtab.add(start_symbol_name.c_str(), false, NULL);
strtab.add(end_symbol_name.c_str(), false, NULL);
strtab.add(size_symbol_name.c_str(), false, NULL);
strtab.set_string_offsets();
// Build the stringpool for the section name table.
Stringpool shstrtab;
shstrtab.add(".data", false, NULL);
shstrtab.add(".symtab", false, NULL);
shstrtab.add(".strtab", false, NULL);
shstrtab.add(".shstrtab", false, NULL);
shstrtab.set_string_offsets();
// Work out the size of the generated file, and the offsets of the
// various sections, and allocate a buffer.
const int sym_size = elfcpp::Elf_sizes<size>::sym_size;
size_t output_size = (elfcpp::Elf_sizes<size>::ehdr_size
+ 5 * elfcpp::Elf_sizes<size>::shdr_size);
size_t data_offset = output_size;
output_size += aligned_filesize;
size_t symtab_offset = output_size;
output_size += 4 * sym_size;
size_t strtab_offset = output_size;
output_size += strtab.get_strtab_size();
size_t shstrtab_offset = output_size;
output_size += shstrtab.get_strtab_size();
unsigned char* buffer = new unsigned char[output_size];
// Write out the data.
unsigned char* pout = buffer;
this->write_file_header<size, big_endian>(&pout);
this->write_section_header<size, big_endian>("", &shstrtab, elfcpp::SHT_NULL,
0, 0, 0, 0, 0,
0, 0, &pout);
// Having the section be named ".data", having it be writable, and
// giving it an alignment of 1 is because the GNU linker does it
// that way, and existing linker script expect it.
this->write_section_header<size, big_endian>(".data", &shstrtab,
elfcpp::SHT_PROGBITS,
(elfcpp::SHF_ALLOC
| elfcpp::SHF_WRITE),
data_offset,
filesize, 0, 0,
1, 0, &pout);
this->write_section_header<size, big_endian>(".symtab", &shstrtab,
elfcpp::SHT_SYMTAB,
0, symtab_offset, 4 * sym_size,
3, 1, align, sym_size, &pout);
this->write_section_header<size, big_endian>(".strtab", &shstrtab,
elfcpp::SHT_STRTAB,
0, strtab_offset,
strtab.get_strtab_size(),
0, 0, 1, 0, &pout);
this->write_section_header<size, big_endian>(".shstrtab", &shstrtab,
elfcpp::SHT_STRTAB,
0, shstrtab_offset,
shstrtab.get_strtab_size(),
0, 0, 1, 0, &pout);
if (filesize > 0)
{
memcpy(pout, fileview, filesize);
pout += filesize;
memset(pout, 0, aligned_filesize - filesize);
pout += aligned_filesize - filesize;
}
this->write_symbol<size, big_endian>("", &strtab, 0, 0, 0, &pout);
this->write_symbol<size, big_endian>(start_symbol_name, &strtab, 0, filesize,
1, &pout);
this->write_symbol<size, big_endian>(end_symbol_name, &strtab, filesize, 0,
1, &pout);
this->write_symbol<size, big_endian>(size_symbol_name, &strtab, filesize, 0,
elfcpp::SHN_ABS, &pout);
strtab.write_to_buffer(pout, strtab.get_strtab_size());
pout += strtab.get_strtab_size();
shstrtab.write_to_buffer(pout, shstrtab.get_strtab_size());
pout += shstrtab.get_strtab_size();
gold_assert(static_cast<size_t>(pout - buffer) == output_size);
this->data_ = buffer;
this->filesize_ = output_size;
f.unlock(task);
return true;
}
// Write out the file header.
template<int size, bool big_endian>
void
Binary_to_elf::write_file_header(unsigned char** ppout)
{
elfcpp::Ehdr_write<size, big_endian> oehdr(*ppout);
unsigned char e_ident[elfcpp::EI_NIDENT];
memset(e_ident, 0, elfcpp::EI_NIDENT);
e_ident[elfcpp::EI_MAG0] = elfcpp::ELFMAG0;
e_ident[elfcpp::EI_MAG1] = elfcpp::ELFMAG1;
e_ident[elfcpp::EI_MAG2] = elfcpp::ELFMAG2;
e_ident[elfcpp::EI_MAG3] = elfcpp::ELFMAG3;
if (size == 32)
e_ident[elfcpp::EI_CLASS] = elfcpp::ELFCLASS32;
else if (size == 64)
e_ident[elfcpp::EI_CLASS] = elfcpp::ELFCLASS64;
else
gold_unreachable();
e_ident[elfcpp::EI_DATA] = (big_endian
? elfcpp::ELFDATA2MSB
: elfcpp::ELFDATA2LSB);
e_ident[elfcpp::EI_VERSION] = elfcpp::EV_CURRENT;
oehdr.put_e_ident(e_ident);
oehdr.put_e_type(elfcpp::ET_REL);
oehdr.put_e_machine(this->elf_machine_);
oehdr.put_e_version(elfcpp::EV_CURRENT);
oehdr.put_e_entry(0);
oehdr.put_e_phoff(0);
oehdr.put_e_shoff(elfcpp::Elf_sizes<size>::ehdr_size);
oehdr.put_e_flags(0);
oehdr.put_e_ehsize(elfcpp::Elf_sizes<size>::ehdr_size);
oehdr.put_e_phentsize(0);
oehdr.put_e_phnum(0);
oehdr.put_e_shentsize(elfcpp::Elf_sizes<size>::shdr_size);
oehdr.put_e_shnum(5);
oehdr.put_e_shstrndx(4);
*ppout += elfcpp::Elf_sizes<size>::ehdr_size;
}
// Write out a section header.
template<int size, bool big_endian>
void
Binary_to_elf::write_section_header(
const char* name,
const Stringpool* shstrtab,
elfcpp::SHT type,
unsigned int flags,
section_size_type offset,
section_size_type section_size,
unsigned int link,
unsigned int info,
unsigned int addralign,
unsigned int entsize,
unsigned char** ppout)
{
elfcpp::Shdr_write<size, big_endian> oshdr(*ppout);
oshdr.put_sh_name(*name == '\0' ? 0 : shstrtab->get_offset(name));
oshdr.put_sh_type(type);
oshdr.put_sh_flags(flags);
oshdr.put_sh_addr(0);
oshdr.put_sh_offset(offset);
oshdr.put_sh_size(section_size);
oshdr.put_sh_link(link);
oshdr.put_sh_info(info);
oshdr.put_sh_addralign(addralign);
oshdr.put_sh_entsize(entsize);
*ppout += elfcpp::Elf_sizes<size>::shdr_size;
}
// Write out a symbol.
template<int size, bool big_endian>
void
Binary_to_elf::write_symbol(
const std::string& name,
const Stringpool* strtab,
section_size_type value,
typename elfcpp::Elf_types<32>::Elf_WXword st_size,
unsigned int shndx,
unsigned char** ppout)
{
unsigned char* pout = *ppout;
elfcpp::Sym_write<size, big_endian> osym(pout);
osym.put_st_name(name.empty() ? 0 : strtab->get_offset(name.c_str()));
osym.put_st_value(value);
osym.put_st_size(st_size);
osym.put_st_info(name.empty() ? elfcpp::STB_LOCAL : elfcpp::STB_GLOBAL,
elfcpp::STT_NOTYPE);
osym.put_st_other(elfcpp::STV_DEFAULT, 0);
osym.put_st_shndx(shndx);
*ppout += elfcpp::Elf_sizes<size>::sym_size;
}
} // End namespace gold.
|
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/time.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
TEST(WaitableEventTest, ManualBasics) {
WaitableEvent event(true, false);
EXPECT_FALSE(event.IsSignaled());
event.Signal();
EXPECT_TRUE(event.IsSignaled());
EXPECT_TRUE(event.IsSignaled());
event.Reset();
EXPECT_FALSE(event.IsSignaled());
EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
event.Signal();
event.Wait();
EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
}
TEST(WaitableEventTest, AutoBasics) {
WaitableEvent event(false, false);
EXPECT_FALSE(event.IsSignaled());
event.Signal();
EXPECT_TRUE(event.IsSignaled());
EXPECT_FALSE(event.IsSignaled());
event.Reset();
EXPECT_FALSE(event.IsSignaled());
EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
event.Signal();
event.Wait();
EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
event.Signal();
EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
}
TEST(WaitableEventTest, WaitManyShortcut) {
WaitableEvent* ev[5];
for (unsigned i = 0; i < 5; ++i)
ev[i] = new WaitableEvent(false, false);
ev[3]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
ev[3]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
ev[4]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 4u);
ev[0]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 0u);
for (unsigned i = 0; i < 5; ++i)
delete ev[i];
}
class WaitableEventSignaler : public PlatformThread::Delegate {
public:
WaitableEventSignaler(double seconds, WaitableEvent* ev)
: seconds_(seconds),
ev_(ev) {
}
void ThreadMain() {
PlatformThread::Sleep(TimeDelta::FromSeconds(static_cast<int>(seconds_)));
ev_->Signal();
}
private:
const double seconds_;
WaitableEvent *const ev_;
};
TEST(WaitableEventTest, WaitMany) {
WaitableEvent* ev[5];
for (unsigned i = 0; i < 5; ++i)
ev[i] = new WaitableEvent(false, false);
WaitableEventSignaler signaler(0.1, ev[2]);
PlatformThreadHandle thread;
PlatformThread::Create(0, &signaler, &thread);
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 2u);
PlatformThread::Join(thread);
for (unsigned i = 0; i < 5; ++i)
delete ev[i];
}
} // namespace base
|
<html lang="en">
<head>
<title>Working Directory - Debugging with GDB</title>
<meta http-equiv="Content-Type" content="text/html">
<meta name="description" content="Debugging with GDB">
<meta name="generator" content="makeinfo 4.13">
<link title="Top" rel="start" href="index.html#Top">
<link rel="up" href="Running.html#Running" title="Running">
<link rel="prev" href="Environment.html#Environment" title="Environment">
<link rel="next" href="Input_002fOutput.html#Input_002fOutput" title="Input/Output">
<link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
<!--
Copyright (C) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996,
1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3 or
any later version published by the Free Software Foundation; with the
Invariant Sections being ``Free Software'' and ``Free Software Needs
Free Documentation'', with the Front-Cover Texts being ``A GNU Manual,''
and with the Back-Cover Texts as in (a) below.
(a) The FSF's Back-Cover Text is: ``You are free to copy and modify
this GNU Manual. Buying copies from GNU Press supports the FSF in
developing GNU and promoting software freedom.''-->
<meta http-equiv="Content-Style-Type" content="text/css">
<style type="text/css"><!--
pre.display { font-family:inherit }
pre.format { font-family:inherit }
pre.smalldisplay { font-family:inherit; font-size:smaller }
pre.smallformat { font-family:inherit; font-size:smaller }
pre.smallexample { font-size:smaller }
pre.smalllisp { font-size:smaller }
span.sc { font-variant:small-caps }
span.roman { font-family:serif; font-weight:normal; }
span.sansserif { font-family:sans-serif; font-weight:normal; }
--></style>
<link rel="stylesheet" type="text/css" href="../cs.css">
</head>
<body>
<div class="node">
<a name="Working-Directory"></a>
<p>
Next: <a rel="next" accesskey="n" href="Input_002fOutput.html#Input_002fOutput">Input/Output</a>,
Previous: <a rel="previous" accesskey="p" href="Environment.html#Environment">Environment</a>,
Up: <a rel="up" accesskey="u" href="Running.html#Running">Running</a>
<hr>
</div>
<h3 class="section">4.5 Your Program's Working Directory</h3>
<p><a name="index-working-directory-_0028of-your-program_0029-119"></a>Each time you start your program with <code>run</code>, it inherits its
working directory from the current working directory of <span class="sc">gdb</span>.
The <span class="sc">gdb</span> working directory is initially whatever it inherited
from its parent process (typically the shell), but you can specify a new
working directory in <span class="sc">gdb</span> with the <code>cd</code> command.
<p>The <span class="sc">gdb</span> working directory also serves as a default for the commands
that specify files for <span class="sc">gdb</span> to operate on. See <a href="Files.html#Files">Commands to Specify Files</a>.
<a name="index-cd-120"></a>
<a name="index-change-working-directory-121"></a>
<dl><dt><code>cd </code><var>directory</var><dd>Set the <span class="sc">gdb</span> working directory to <var>directory</var>.
<p><a name="index-pwd-122"></a><br><dt><code>pwd</code><dd>Print the <span class="sc">gdb</span> working directory.
</dl>
<p>It is generally impossible to find the current working directory of
the process being debugged (since a program can change its directory
during its run). If you work on a system where <span class="sc">gdb</span> is
configured with the <samp><span class="file">/proc</span></samp> support, you can use the <code>info
proc</code> command (see <a href="SVR4-Process-Information.html#SVR4-Process-Information">SVR4 Process Information</a>) to find out the
current working directory of the debuggee.
</body></html>
|
/**
* \file
*
* \brief Autogenerated API include file for the Atmel Software Framework (ASF)
*
* Copyright (c) 2012 Atmel Corporation. All rights reserved.
*
* \asf_license_start
*
* \page License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. The name of Atmel may not be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* 4. This software may only be redistributed and used in connection with an
* Atmel microcontroller product.
*
* THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* \asf_license_stop
*
*/
#ifndef ASF_H
#define ASF_H
/*
* This file includes all API header files for the selected drivers from ASF.
* Note: There might be duplicate includes required by more than one driver.
*
* The file is automatically generated and will be re-written when
* running the ASF driver selector tool. Any changes will be discarded.
*/
// From module: Compiler abstraction layer and code utilities
#include <compiler.h>
#include <status_codes.h>
// From module: EVK1101
#include <led.h>
// From module: FLASHC - Flash Controller
#include <flashc.h>
// From module: GPIO - General-Purpose Input/Output
#include <gpio.h>
// From module: Generic board support
#include <board.h>
// From module: Interrupt management - UC3 implementation
#include <interrupt.h>
// From module: Memory Control Access Interface
#include <ctrl_access.h>
// From module: PM Power Manager- UC3 A0/A1/A3/A4/B0/B1 implementation
#include <power_clocks_lib.h>
#include <sleep.h>
// From module: Part identification macros
#include <parts.h>
// From module: SPI - Serial Peripheral Interface
#include <spi.h>
// From module: SPI - UC3 implementation
#include <spi_master.h>
#include <spi_master.h>
// From module: Sleep manager - UC3 implementation
#include <sleepmgr.h>
#include <uc3/sleepmgr.h>
// From module: System Clock Control - UC3 B0 implementation
#include <sysclk.h>
// From module: USART - Universal Synchronous/Asynchronous Receiver/Transmitter
#include <usart.h>
// From module: USB CDC Protocol
#include <usb_protocol_cdc.h>
// From module: USB Device CDC (Composite Device)
#include <udi_cdc.h>
// From module: USB Device MSC (Composite Device)
#include <udi_msc.h>
// From module: USB Device Stack Core (Common API)
#include <udc.h>
#include <udd.h>
// From module: USB MSC Protocol
#include <usb_protocol_msc.h>
#endif // ASF_H
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* System Control and Management Interface (SCMI) Message Protocol
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
* Copyright (C) 2018 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
#define MAX_PROTOCOLS_IMP 16
#define MAX_OPPS 16
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
};
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
* @minor_version: Minor version of the ABI that firmware supports
* @major_version: Major version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
* backward compatible.
*
* Response to a generic message with message type SCMI_MSG_VERSION
*/
struct scmi_msg_resp_prot_version {
__le16 minor_version;
__le16 major_version;
};
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
#define MSG_TYPE_COMMAND 0
#define MSG_TYPE_DELAYED_RESP 2
#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
* @id: The identifier of the message being sent
* @protocol_id: The identifier of the protocol used to send @id message
* @seq: The token to identify the message. When a message returns, the
* platform returns the whole message header unmodified including the
* token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
*/
struct scmi_msg_hdr {
u8 id;
u8 protocol_id;
u16 seq;
u32 status;
bool poll_completion;
};
/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
* Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
}
/**
* unpack_scmi_header() - unpacks and records message and protocol id
*
* @msg_hdr: 32-bit packed message header sent from the platform
* @hdr: pointer to header to fetch message and protocol id.
*/
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
{
hdr->id = MSG_XTRACT_ID(msg_hdr);
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
}
/**
* struct scmi_msg - Message(Tx/Rx) structure
*
* @buf: Buffer pointer
* @len: Length of data in the Buffer
*/
struct scmi_msg {
void *buf;
size_t len;
};
/**
* struct scmi_xfer - Structure representing a message flow
*
* @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
* @async_done: pointer to delayed response message received event completion
*/
struct scmi_xfer {
int transfer_id;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
struct completion *async_done;
};
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
struct scmi_xfer *xfer);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp);
int scmi_base_protocol_init(struct scmi_handle *h);
int __init scmi_bus_init(void);
void __exit scmi_bus_exit(void);
#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
int __init scmi_##func##_register(void); \
void __exit scmi_##func##_unregister(void)
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
int __init scmi_##name##_register(void) \
{ \
return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
} \
\
void __exit scmi_##name##_unregister(void) \
{ \
scmi_protocol_unregister((id)); \
}
/* SCMI Transport */
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
*
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
struct device *dev;
struct scmi_handle *handle;
void *transport_info;
};
/**
* struct scmi_transport_ops - Structure representing a SCMI transport ops
*
* @chan_available: Callback to check if channel is available or not
* @chan_setup: Callback to allocate and setup a channel
* @chan_free: Callback to free a channel
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
* @fetch_notification: Callback to fetch notification
* @clear_channel: Callback to clear a channel
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
bool (*chan_available)(struct device *dev, int idx);
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
bool tx);
int (*chan_free)(int id, void *p, void *data);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*fetch_notification)(struct scmi_chan_info *cinfo,
size_t max_len, struct scmi_xfer *xfer);
void (*clear_channel)(struct scmi_chan_info *cinfo);
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
/**
* struct scmi_desc - Description of SoC integration
*
* @ops: Pointer to the transport specific ops structure
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msg: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct scmi_desc {
const struct scmi_transport_ops *ops;
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
};
extern const struct scmi_desc scmi_mailbox_desc;
#ifdef CONFIG_HAVE_ARM_SMCCC
extern const struct scmi_desc scmi_smc_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
/* shmem related declarations */
struct scmi_shared_mem;
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
size_t max_len, struct scmi_xfer *xfer);
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
#endif /* _SCMI_COMMON_H */
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Per-thread (in Go, per-M) malloc cache for small objects.
//
// See malloc.h for an overview.
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
void*
runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
{
MCacheList *l;
MLink *first, *v;
int32 n;
// Allocate from list.
l = &c->list[sizeclass];
if(l->list == nil) {
// Replenish using central lists.
n = runtime_MCentral_AllocList(&runtime_mheap.central[sizeclass],
runtime_class_to_transfercount[sizeclass], &first);
if(n == 0)
runtime_throw("out of memory");
l->list = first;
l->nlist = n;
c->size += n*size;
}
v = l->list;
l->list = v->next;
l->nlist--;
if(l->nlist < l->nlistmin)
l->nlistmin = l->nlist;
c->size -= size;
// v is zeroed except for the link pointer
// that we used above; zero that.
v->next = nil;
if(zeroed) {
// block is zeroed iff second word is zero ...
if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
runtime_memclr((byte*)v, size);
else {
// ... except for the link pointer
// that we used above; zero that.
v->next = nil;
}
}
c->local_cachealloc += size;
c->local_objects++;
return v;
}
// Take n elements off l and return them to the central free list.
static void
ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
{
MLink *first, **lp;
int32 i;
// Cut off first n elements.
first = l->list;
lp = &l->list;
for(i=0; i<n; i++)
lp = &(*lp)->next;
l->list = *lp;
*lp = nil;
l->nlist -= n;
if(l->nlist < l->nlistmin)
l->nlistmin = l->nlist;
c->size -= n*runtime_class_to_size[sizeclass];
// Return them to central free list.
runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], n, first);
}
void
runtime_MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
{
int32 i, n;
MCacheList *l;
MLink *p;
// Put back on list.
l = &c->list[sizeclass];
p = v;
p->next = l->list;
l->list = p;
l->nlist++;
c->size += size;
c->local_cachealloc -= size;
c->local_objects--;
if(l->nlist >= MaxMCacheListLen) {
// Release a chunk back.
ReleaseN(c, l, runtime_class_to_transfercount[sizeclass], sizeclass);
}
if(c->size >= MaxMCacheSize) {
// Scavenge.
for(i=0; i<NumSizeClasses; i++) {
l = &c->list[i];
n = l->nlistmin;
// n is the minimum number of elements we've seen on
// the list since the last scavenge. If n > 0, it means that
// we could have gotten by with n fewer elements
// without needing to consult the central free list.
// Move toward that situation by releasing n/2 of them.
if(n > 0) {
if(n > 1)
n /= 2;
ReleaseN(c, l, n, i);
}
l->nlistmin = l->nlist;
}
}
}
void
runtime_MCache_ReleaseAll(MCache *c)
{
int32 i;
MCacheList *l;
for(i=0; i<NumSizeClasses; i++) {
l = &c->list[i];
ReleaseN(c, l, l->nlist, i);
l->nlistmin = 0;
}
}
|
<?php
/**
* @file
* Contains Drupal\Tests\Core\ParamConverter\ParamConverterManagerTest.
*/
namespace Drupal\Tests\Core\ParamConverter;
use Drupal\Core\ParamConverter\ParamConverterManager;
use Drupal\Tests\UnitTestCase;
use Symfony\Cmf\Component\Routing\RouteObjectInterface;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\Routing\Route;
use Symfony\Component\Routing\RouteCollection;
/**
* Tests the typed data resolver manager.
*
* @coversDefaultClass \Drupal\Core\ParamConverter\ParamConverterManager
*/
class ParamConverterManagerTest extends UnitTestCase {
/**
* @var \Symfony\Component\DependencyInjection\ContainerBuilder|\PHPUnit_Framework_MockObject_MockObject
*/
protected $container;
/**
* @var \Drupal\Core\ParamConverter\ParamConverterManager
*/
protected $manager;
/**
* {@inheritdoc}
*/
public static function getInfo() {
return array(
'name' => 'Parameter converter manager',
'description' => 'Tests the parameter converter manager.',
'group' => 'Routing',
);
}
/**
* {@inheritdoc}
*/
public function setUp() {
parent::setUp();
$this->container = $this->getMock('Drupal\Core\DependencyInjection\Container');
$this->manager = new ParamConverterManager();
$this->manager->setContainer($this->container);
}
/**
* Tests \Drupal\Core\ParamConverter\ParamConverterManager::addConverter().
*
* @dataProvider providerTestAddConverter
*
* @covers ::addConverter()
* @covers ::getConverterIds()
*/
public function testAddConverter($unsorted, $sorted) {
foreach ($unsorted as $data) {
$this->manager->addConverter($data['name'], $data['priority']);
}
// Test that ResolverManager::getTypedDataResolvers() returns the resolvers
// in the expected order.
foreach ($this->manager->getConverterIds() as $key => $converter) {
$this->assertEquals($sorted[$key], $converter);
}
}
/**
* Tests \Drupal\Core\ParamConverter\ParamConverterManager::getConverter().
*
* @dataProvider providerTestGetConverter
*
* @covers ::getConverter()
*/
public function testGetConverter($name, $priority, $class) {
$converter = $this->getMockBuilder('Drupal\Core\ParamConverter\ParamConverterInterface')
->setMockClassName($class)
->getMock();
$this->manager->addConverter($name, $priority);
$this->container->expects($this->once())
->method('get')
->with($name)
->will($this->returnValue($converter));
$this->assertInstanceOf($class, $this->manager->getConverter($name));
// Assert that a second call to getConverter() does not use the container.
$this->assertInstanceOf($class, $this->manager->getConverter($name));
}
/**
* Tests \Drupal\Core\ParamConverter\ParamConverterManager::getConverter().
*
* @covers ::getConverter()
*
* @expectedException \InvalidArgumentException
*/
public function testGetConverterException() {
$this->manager->getConverter('undefined.converter');
}
/**
* Provide data for parameter converter manager tests.
*
* @return array
* An array of arrays, each containing the input parameters for
* providerTestResolvers::testAddConverter().
*
* @see ParamConverterManagerTest::testAddConverter().
*/
public function providerTestAddConverter() {
$converters[0]['unsorted'] = array(
array('name' => 'raspberry', 'priority' => 10),
array('name' => 'pear', 'priority' => 5),
array('name' => 'strawberry', 'priority' => 20),
array('name' => 'pineapple', 'priority' => 0),
array('name' => 'banana', 'priority' => -10),
array('name' => 'apple', 'priority' => -10),
array('name' => 'peach', 'priority' => 5),
);
$converters[0]['sorted'] = array(
'strawberry', 'raspberry', 'pear', 'peach',
'pineapple', 'banana', 'apple'
);
$converters[1]['unsorted'] = array(
array('name' => 'ape', 'priority' => 0),
array('name' => 'cat', 'priority' => -5),
array('name' => 'puppy', 'priority' => -10),
array('name' => 'llama', 'priority' => -15),
array('name' => 'giraffe', 'priority' => 10),
array('name' => 'zebra', 'priority' => 10),
array('name' => 'eagle', 'priority' => 5),
);
$converters[1]['sorted'] = array(
'giraffe', 'zebra', 'eagle', 'ape',
'cat', 'puppy', 'llama'
);
return $converters;
}
/**
* Provide data for parameter converter manager tests.
*
* @return array
* An array of arrays, each containing the input parameters for
* providerTestResolvers::testGetConverter().
*
* @see ParamConverterManagerTest::testGetConverter().
*/
public function providerTestGetConverter() {
return array(
array('ape', 0, 'ApeConverterClass'),
array('cat', -5, 'CatConverterClass'),
array('puppy', -10, 'PuppyConverterClass'),
array('llama', -15, 'LlamaConverterClass'),
array('giraffe', 10, 'GiraffeConverterClass'),
array('zebra', 10, 'ZebraConverterClass'),
array('eagle', 5, 'EagleConverterClass'),
);
}
/**
* @covers ::setRouteParameterConverters()
*
* @dataProvider providerTestSetRouteParameterConverters
*/
public function testSetRouteParameterConverters($path, $parameters = NULL, $expected = NULL) {
$converter = $this->getMock('Drupal\Core\ParamConverter\ParamConverterInterface');
$converter->expects($this->any())
->method('applies')
->with($this->anything(), 'id', $this->anything())
->will($this->returnValue(TRUE));
$this->manager->addConverter('applied');
$this->container->expects($this->any())
->method('get')
->with('applied')
->will($this->returnValue($converter));
$route = new Route($path);
if ($parameters) {
$route->setOption('parameters', $parameters);
}
$collection = new RouteCollection();
$collection->add('test_route', $route);
$this->manager->setRouteParameterConverters($collection);
foreach ($collection as $route) {
$result = $route->getOption('parameters');
if ($expected) {
$this->assertSame($expected, $result['id']['converter']);
}
else {
$this->assertNull($result);
}
}
}
/**
* Provides data for testSetRouteParameterConverters().
*/
public function providerTestSetRouteParameterConverters() {
return array(
array('/test'),
array('/test/{id}', array('id' => array()), 'applied'),
array('/test/{id}', array('id' => array('converter' => 'predefined')), 'predefined'),
);
}
/**
* @covers ::convert()
*/
public function testConvert() {
$route = new Route('/test/{id}/{literal}/{null}');
$parameters = array(
'id' => array(
'converter' => 'test_convert',
),
'literal' => array(),
'null' => array(),
);
$route->setOption('parameters', $parameters);
$defaults = array(
RouteObjectInterface::ROUTE_OBJECT => $route,
RouteObjectInterface::ROUTE_NAME => 'test_route',
'id' => 1,
'literal' => 'this is a literal',
'null' => NULL,
);
$expected = $defaults;
$expected['id'] = 'something_better!';
$converter = $this->getMock('Drupal\Core\ParamConverter\ParamConverterInterface');
$converter->expects($this->any())
->method('convert')
->with(1, $this->isType('array'), 'id', $this->isType('array'), $this->isInstanceOf('Symfony\Component\HttpFoundation\Request'))
->will($this->returnValue('something_better!'));
$this->manager->addConverter('test_convert');
$this->container->expects($this->once())
->method('get')
->with('test_convert')
->will($this->returnValue($converter));
$result = $this->manager->convert($defaults, new Request());
$this->assertEquals($expected, $result);
}
/**
* @covers ::convert()
*/
public function testConvertNoConverting() {
$route = new Route('/test');
$defaults = array(
RouteObjectInterface::ROUTE_OBJECT => $route,
RouteObjectInterface::ROUTE_NAME => 'test_route',
);
$expected = $defaults;
$result = $this->manager->convert($defaults, new Request());
$this->assertEquals($expected, $result);
}
/**
* @covers ::convert()
*
* @expectedException \Drupal\Core\ParamConverter\ParamNotConvertedException
* @expectedExceptionMessage The "id" parameter was not converted for the path "/test/{id}" (route name: "test_route")
*/
public function testConvertMissingParam() {
$route = new Route('/test/{id}');
$parameters = array(
'id' => array(
'converter' => 'test_convert',
),
);
$route->setOption('parameters', $parameters);
$defaults = array(
RouteObjectInterface::ROUTE_OBJECT => $route,
RouteObjectInterface::ROUTE_NAME => 'test_route',
'id' => 1,
);
$converter = $this->getMock('Drupal\Core\ParamConverter\ParamConverterInterface');
$converter->expects($this->any())
->method('convert')
->with(1, $this->isType('array'), 'id', $this->isType('array'), $this->isInstanceOf('Symfony\Component\HttpFoundation\Request'))
->will($this->returnValue(NULL));
$this->manager->addConverter('test_convert');
$this->container->expects($this->once())
->method('get')
->with('test_convert')
->will($this->returnValue($converter));
$this->manager->convert($defaults, new Request());
}
}
|
<?php
namespace app\properties\handlers\datepicker;
use app\properties\handlers\AbstractHandler;
class DatepickerProperty extends AbstractHandler
{
} |
/* test mpz_divisible_2exp_p */
/*
Copyright 2001 Free Software Foundation, Inc.
This file is part of the GNU MP Library test suite.
The GNU MP Library test suite is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
The GNU MP Library test suite is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
the GNU MP Library test suite. If not, see http://www.gnu.org/licenses/. */
#include <stdio.h>
#include <stdlib.h>
#include "gmp.h"
#include "gmp-impl.h"
#include "tests.h"
void
check_one (mpz_srcptr a, unsigned long d, int want)
{
int got;
got = (mpz_divisible_2exp_p (a, d) != 0);
if (want != got)
{
printf ("mpz_divisible_2exp_p wrong\n");
printf (" expected %d got %d\n", want, got);
mpz_trace (" a", a);
printf (" d=%lu\n", d);
mp_trace_base = -16;
mpz_trace (" a", a);
printf (" d=0x%lX\n", d);
abort ();
}
}
void
check_data (void)
{
static const struct {
const char *a;
unsigned long d;
int want;
} data[] = {
{ "0", 0, 1 },
{ "0", 1, 1 },
{ "0", 2, 1 },
{ "0", 3, 1 },
{ "1", 0, 1 },
{ "1", 1, 0 },
{ "1", 2, 0 },
{ "1", 3, 0 },
{ "1", 10000, 0 },
{ "4", 0, 1 },
{ "4", 1, 1 },
{ "4", 2, 1 },
{ "4", 3, 0 },
{ "4", 4, 0 },
{ "4", 10000, 0 },
{ "0x80000000", 31, 1 },
{ "0x80000000", 32, 0 },
{ "0x80000000", 64, 0 },
{ "0x100000000", 32, 1 },
{ "0x100000000", 33, 0 },
{ "0x100000000", 64, 0 },
{ "0x8000000000000000", 63, 1 },
{ "0x8000000000000000", 64, 0 },
{ "0x8000000000000000", 128, 0 },
{ "0x10000000000000000", 64, 1 },
{ "0x10000000000000000", 65, 0 },
{ "0x10000000000000000", 128, 0 },
{ "0x10000000000000000", 256, 0 },
{ "0x10000000000000000100000000", 32, 1 },
{ "0x10000000000000000100000000", 33, 0 },
{ "0x10000000000000000100000000", 64, 0 },
{ "0x1000000000000000010000000000000000", 64, 1 },
{ "0x1000000000000000010000000000000000", 65, 0 },
{ "0x1000000000000000010000000000000000", 128, 0 },
{ "0x1000000000000000010000000000000000", 256, 0 },
{ "0x1000000000000000010000000000000000", 1024, 0 },
};
mpz_t a, d;
int i;
mpz_init (a);
mpz_init (d);
for (i = 0; i < numberof (data); i++)
{
mpz_set_str_or_abort (a, data[i].a, 0);
check_one (a, data[i].d, data[i].want);
mpz_neg (a, a);
check_one (a, data[i].d, data[i].want);
}
mpz_clear (a);
mpz_clear (d);
}
int
main (int argc, char *argv[])
{
tests_start ();
check_data ();
tests_end ();
exit (0);
}
|
/*
* inet_diag.c Module for monitoring INET transport protocols sockets.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/time.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/inet6_hashtables.h>
#include <net/netlink.h>
#include <linux/inet.h>
#include <linux/stddef.h>
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
static const struct inet_diag_handler **inet_diag_table;
struct inet_diag_entry {
__be32 *saddr;
__be32 *daddr;
u16 sport;
u16 dport;
u16 family;
u16 userlocks;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
#endif
};
#define INET_DIAG_PUT(skb, attrtype, attrlen) \
RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
if (!inet_diag_table[proto])
request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
NETLINK_SOCK_DIAG, AF_INET, proto);
mutex_lock(&inet_diag_table_mutex);
if (!inet_diag_table[proto])
return ERR_PTR(-ENOENT);
return inet_diag_table[proto];
}
static inline void inet_diag_unlock_handler(
const struct inet_diag_handler *handler)
{
mutex_unlock(&inet_diag_table_mutex);
}
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
void *info = NULL;
struct inet_diag_meminfo *minfo = NULL;
unsigned char *b = skb_tail_pointer(skb);
const struct inet_diag_handler *handler;
int ext = req->idiag_ext;
handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(handler == NULL);
nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
nlh->nlmsg_flags = nlmsg_flags;
r = NLMSG_DATA(nlh);
BUG_ON(sk->sk_state == TCP_TIME_WAIT);
if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
r->id.idiag_if = sk->sk_bound_dev_if;
sock_diag_save_cookie(sk, r->id.idiag_cookie);
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
if (ext & (1 << (INET_DIAG_TOS - 1)))
RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
*(struct in6_addr *)r->id.idiag_dst = np->daddr;
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
}
#endif
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = sock_i_ino(sk);
if (minfo) {
minfo->idiag_rmem = sk_rmem_alloc_get(sk);
minfo->idiag_wmem = sk->sk_wmem_queued;
minfo->idiag_fmem = sk->sk_forward_alloc;
minfo->idiag_tmem = sk_wmem_alloc_get(sk);
}
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
goto rtattr_failure;
if (icsk == NULL) {
handler->idiag_get_info(sk, r, NULL);
goto out;
}
#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
} else {
r->idiag_timer = 0;
r->idiag_expires = 0;
}
#undef EXPIRES_IN_MS
if (ext & (1 << (INET_DIAG_INFO - 1)))
info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
const size_t len = strlen(icsk->icsk_ca_ops->name);
strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
icsk->icsk_ca_ops->name);
}
handler->idiag_get_info(sk, r, info);
if (sk->sk_state < TCP_TIME_WAIT &&
icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
icsk->icsk_ca_ops->get_info(sk, ext, skb);
out:
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
rtattr_failure:
nlmsg_failure:
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
static int inet_csk_diag_fill(struct sock *sk,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
return inet_sk_diag_fill(sk, inet_csk(sk),
skb, req, pid, seq, nlmsg_flags, unlh);
}
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
long tmo;
struct inet_diag_msg *r;
const unsigned char *previous_tail = skb_tail_pointer(skb);
struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
unlh->nlmsg_type, sizeof(*r));
r = NLMSG_DATA(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
nlh->nlmsg_flags = nlmsg_flags;
tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
r->id.idiag_if = tw->tw_bound_dev_if;
sock_diag_save_cookie(tw, r->id.idiag_cookie);
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
const struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
*(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
return skb->len;
nlmsg_failure:
nlmsg_trim(skb, previous_tail);
return -EMSGSIZE;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
skb, r, pid, seq, nlmsg_flags,
unlh);
return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
}
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
{
int err;
struct sock *sk;
struct sk_buff *rep;
err = -EINVAL;
if (req->sdiag_family == AF_INET) {
sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6) {
sk = inet6_lookup(&init_net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
req->id.idiag_if);
}
#endif
else {
goto out_nosk;
}
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
goto out;
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) +
sizeof(struct tcp_info) + 64)),
GFP_KERNEL);
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
goto out;
}
err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
out:
if (sk) {
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_put((struct inet_timewait_sock *)sk);
else
sock_put(sk);
}
out_nosk:
return err;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
static int inet_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req)
{
const struct inet_diag_handler *handler;
int err;
handler = inet_diag_lock_handler(req->sdiag_protocol);
if (IS_ERR(handler))
err = PTR_ERR(handler);
else
err = handler->dump_one(in_skb, nlh, req);
inet_diag_unlock_handler(handler);
return err;
}
static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
{
int words = bits >> 5;
bits &= 0x1f;
if (words) {
if (memcmp(a1, a2, words << 2))
return 0;
}
if (bits) {
__be32 w1, w2;
__be32 mask;
w1 = a1[words];
w2 = a2[words];
mask = htonl((0xffffffff) << (32 - bits));
if ((w1 ^ w2) & mask)
return 0;
}
return 1;
}
static int inet_diag_bc_run(const struct nlattr *_bc,
const struct inet_diag_entry *entry)
{
const void *bc = nla_data(_bc);
int len = nla_len(_bc);
while (len > 0) {
int yes = 1;
const struct inet_diag_bc_op *op = bc;
switch (op->code) {
case INET_DIAG_BC_NOP:
break;
case INET_DIAG_BC_JMP:
yes = 0;
break;
case INET_DIAG_BC_S_GE:
yes = entry->sport >= op[1].no;
break;
case INET_DIAG_BC_S_LE:
yes = entry->sport <= op[1].no;
break;
case INET_DIAG_BC_D_GE:
yes = entry->dport >= op[1].no;
break;
case INET_DIAG_BC_D_LE:
yes = entry->dport <= op[1].no;
break;
case INET_DIAG_BC_AUTO:
yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
break;
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND: {
struct inet_diag_hostcond *cond;
__be32 *addr;
cond = (struct inet_diag_hostcond *)(op + 1);
if (cond->port != -1 &&
cond->port != (op->code == INET_DIAG_BC_S_COND ?
entry->sport : entry->dport)) {
yes = 0;
break;
}
if (op->code == INET_DIAG_BC_S_COND)
addr = entry->saddr;
else
addr = entry->daddr;
if (cond->family != AF_UNSPEC &&
cond->family != entry->family) {
if (entry->family == AF_INET6 &&
cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr + 3,
cond->addr,
cond->prefix_len))
break;
}
yes = 0;
break;
}
if (cond->prefix_len == 0)
break;
if (bitstring_match(addr, cond->addr,
cond->prefix_len))
break;
yes = 0;
break;
}
}
if (yes) {
len -= op->yes;
bc += op->yes;
} else {
len -= op->no;
bc += op->no;
}
}
return len == 0;
}
int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
{
struct inet_diag_entry entry;
struct inet_sock *inet = inet_sk(sk);
if (bc == NULL)
return 1;
entry.family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
if (entry.family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
entry.saddr = np->rcv_saddr.s6_addr32;
entry.daddr = np->daddr.s6_addr32;
} else
#endif
{
entry.saddr = &inet->inet_rcv_saddr;
entry.daddr = &inet->inet_daddr;
}
entry.sport = inet->inet_num;
entry.dport = ntohs(inet->inet_dport);
entry.userlocks = sk->sk_userlocks;
return inet_diag_bc_run(bc, &entry);
}
EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
static int valid_cc(const void *bc, int len, int cc)
{
while (len >= 0) {
const struct inet_diag_bc_op *op = bc;
if (cc > len)
return 0;
if (cc == len)
return 1;
if (op->yes < 4 || op->yes & 3)
return 0;
len -= op->yes;
bc += op->yes;
}
return 0;
}
/* Validate an inet_diag_hostcond. */
static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
int *min_len)
{
int addr_len;
struct inet_diag_hostcond *cond;
/* Check hostcond space. */
*min_len += sizeof(struct inet_diag_hostcond);
if (len < *min_len)
return false;
cond = (struct inet_diag_hostcond *)(op + 1);
/* Check address family and address length. */
switch (cond->family) {
case AF_UNSPEC:
addr_len = 0;
break;
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
default:
return false;
}
*min_len += addr_len;
if (len < *min_len)
return false;
/* Check prefix length (in bits) vs address length (in bytes). */
if (cond->prefix_len > 8 * addr_len)
return false;
return true;
}
/* Validate a port comparison operator. */
static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
int len, int *min_len)
{
/* Port comparisons put the port in a follow-on inet_diag_bc_op. */
*min_len += sizeof(struct inet_diag_bc_op);
if (len < *min_len)
return false;
return true;
}
static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
{
const void *bc = bytecode;
int len = bytecode_len;
while (len > 0) {
const struct inet_diag_bc_op *op = bc;
int min_len = sizeof(struct inet_diag_bc_op);
switch (op->code) {
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND:
if (!valid_hostcond(bc, len, &min_len))
return -EINVAL;
break;
case INET_DIAG_BC_S_GE:
case INET_DIAG_BC_S_LE:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
if (!valid_port_comparison(bc, len, &min_len))
return -EINVAL;
break;
case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
break;
default:
return -EINVAL;
}
if (op->code != INET_DIAG_BC_NOP) {
if (op->no < min_len || op->no > len + 4 || op->no & 3)
return -EINVAL;
if (op->no < len &&
!valid_cc(bytecode, bytecode_len, len - op->no))
return -EINVAL;
}
if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
return -EINVAL;
bc += op->yes;
len -= op->yes;
}
return len == 0 ? 0 : -EINVAL;
}
static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
return inet_csk_diag_fill(sk, skb, r,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
struct sk_buff *skb,
struct netlink_callback *cb,
struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
if (bc != NULL) {
struct inet_diag_entry entry;
entry.family = tw->tw_family;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
entry.daddr = tw6->tw_v6_daddr.s6_addr32;
} else
#endif
{
entry.saddr = &tw->tw_rcv_saddr;
entry.daddr = &tw->tw_daddr;
}
entry.sport = tw->tw_num;
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
if (!inet_diag_bc_run(bc, &entry))
return 0;
}
return inet_twsk_diag_fill(tw, skb, r,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
* from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
*/
static inline void inet_diag_req_addrs(const struct sock *sk,
const struct request_sock *req,
struct inet_diag_entry *entry)
{
struct inet_request_sock *ireq = inet_rsk(req);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6) {
if (req->rsk_ops->family == AF_INET6) {
entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
} else if (req->rsk_ops->family == AF_INET) {
ipv6_addr_set_v4mapped(ireq->loc_addr,
&entry->saddr_storage);
ipv6_addr_set_v4mapped(ireq->rmt_addr,
&entry->daddr_storage);
entry->saddr = entry->saddr_storage.s6_addr32;
entry->daddr = entry->daddr_storage.s6_addr32;
}
} else
#endif
{
entry->saddr = &ireq->loc_addr;
entry->daddr = &ireq->rmt_addr;
}
}
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
struct request_sock *req, u32 pid, u32 seq,
const struct nlmsghdr *unlh)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
unsigned char *b = skb_tail_pointer(skb);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
nlh->nlmsg_flags = NLM_F_MULTI;
r = NLMSG_DATA(nlh);
r->idiag_family = sk->sk_family;
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
r->idiag_retrans = req->retrans;
r->id.idiag_if = sk->sk_bound_dev_if;
sock_diag_save_cookie(req, r->id.idiag_cookie);
tmo = req->expires - jiffies;
if (tmo < 0)
tmo = 0;
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->rmt_port;
r->id.idiag_src[0] = ireq->loc_addr;
r->id.idiag_dst[0] = ireq->rmt_addr;
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = 0;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
struct inet_diag_entry entry;
inet_diag_req_addrs(sk, req, &entry);
memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
nlmsg_failure:
nlmsg_trim(skb, b);
return -1;
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
struct netlink_callback *cb,
struct inet_diag_req_v2 *r,
const struct nlattr *bc)
{
struct inet_diag_entry entry;
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
int reqnum, s_reqnum;
int err = 0;
s_j = cb->args[3];
s_reqnum = cb->args[4];
if (s_j > 0)
s_j--;
entry.family = sk->sk_family;
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !lopt->qlen)
goto out;
if (bc != NULL) {
entry.sport = inet->inet_num;
entry.userlocks = sk->sk_userlocks;
}
for (j = s_j; j < lopt->nr_table_entries; j++) {
struct request_sock *req, *head = lopt->syn_table[j];
reqnum = 0;
for (req = head; req; reqnum++, req = req->dl_next) {
struct inet_request_sock *ireq = inet_rsk(req);
if (reqnum < s_reqnum)
continue;
if (r->id.idiag_dport != ireq->rmt_port &&
r->id.idiag_dport)
continue;
if (bc) {
inet_diag_req_addrs(sk, req, &entry);
entry.dport = ntohs(ireq->rmt_port);
if (!inet_diag_bc_run(bc, &entry))
continue;
}
err = inet_diag_fill_req(skb, sk, req,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, cb->nlh);
if (err < 0) {
cb->args[3] = j + 1;
cb->args[4] = reqnum;
goto out;
}
}
s_reqnum = 0;
}
out:
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err;
}
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int i, num;
int s_i, s_num;
s_i = cb->args[1];
s_num = num = cb->args[2];
if (cb->args[0] == 0) {
if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
goto skip_listen_ht;
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct inet_listen_hashbucket *ilb;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock_bh(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) {
num++;
continue;
}
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next_listen;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_listen;
if (!(r->idiag_states & TCPF_LISTEN) ||
r->id.idiag_dport ||
cb->args[3] > 0)
goto syn_recv;
if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
syn_recv:
if (!(r->idiag_states & TCPF_SYN_RECV))
goto next_listen;
if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
next_listen:
cb->args[3] = 0;
cb->args[4] = 0;
++num;
}
spin_unlock_bh(&ilb->lock);
s_num = 0;
cb->args[3] = 0;
cb->args[4] = 0;
}
skip_listen_ht:
cb->args[0] = 1;
s_i = num = s_num = 0;
}
if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
goto out;
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct sock *sk;
struct hlist_nulls_node *node;
num = 0;
if (hlist_nulls_empty(&head->chain) &&
hlist_nulls_empty(&head->twchain))
continue;
if (i > s_i)
s_num = 0;
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next_normal;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_normal;
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_normal:
++num;
}
if (r->idiag_states & TCPF_TIME_WAIT) {
struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
&head->twchain) {
if (num < s_num)
goto next_dying;
if (r->sdiag_family != AF_UNSPEC &&
tw->tw_family != r->sdiag_family)
goto next_dying;
if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport)
goto next_dying;
if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
spin_unlock_bh(lock);
}
done:
cb->args[1] = i;
cb->args[2] = num;
out:
;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
const struct inet_diag_handler *handler;
int err = 0;
handler = inet_diag_lock_handler(r->sdiag_protocol);
if (!IS_ERR(handler))
handler->dump(skb, cb, r, bc);
else
err = PTR_ERR(handler);
inet_diag_unlock_handler(handler);
return err ? : skb->len;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req_v2);
if (nlmsg_attrlen(cb->nlh, hdrlen))
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc);
}
static inline int inet_diag_type2proto(int type)
{
switch (type) {
case TCPDIAG_GETSOCK:
return IPPROTO_TCP;
case DCCPDIAG_GETSOCK:
return IPPROTO_DCCP;
default:
return 0;
}
}
static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
{
struct inet_diag_req *rc = NLMSG_DATA(cb->nlh);
struct inet_diag_req_v2 req;
struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req);
req.sdiag_family = AF_UNSPEC;
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
if (nlmsg_attrlen(cb->nlh, hdrlen))
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
return __inet_diag_dump(skb, cb, &req, bc);
}
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{
struct inet_diag_req *rc = NLMSG_DATA(nlh);
struct inet_diag_req_v2 req;
req.sdiag_family = rc->idiag_family;
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext;
req.idiag_states = rc->idiag_states;
req.id = rc->id;
return inet_diag_get_exact(in_skb, nlh, &req);
}
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int hdrlen = sizeof(struct inet_diag_req);
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr;
attr = nlmsg_find_attr(nlh, hdrlen,
INET_DIAG_REQ_BYTECODE);
if (attr == NULL ||
nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
}
{
struct netlink_dump_control c = {
.dump = inet_diag_dump_compat,
};
return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
}
}
return inet_diag_get_exact_compat(skb, nlh);
}
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct inet_diag_req_v2);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
if (h->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(h, hdrlen)) {
struct nlattr *attr;
attr = nlmsg_find_attr(h, hdrlen,
INET_DIAG_REQ_BYTECODE);
if (attr == NULL ||
nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
}
{
struct netlink_dump_control c = {
.dump = inet_diag_dump,
};
return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
}
}
return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
}
static struct sock_diag_handler inet_diag_handler = {
.family = AF_INET,
.dump = inet_diag_handler_dump,
};
static struct sock_diag_handler inet6_diag_handler = {
.family = AF_INET6,
.dump = inet_diag_handler_dump,
};
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
int err = -EINVAL;
if (type >= IPPROTO_MAX)
goto out;
mutex_lock(&inet_diag_table_mutex);
err = -EEXIST;
if (inet_diag_table[type] == NULL) {
inet_diag_table[type] = h;
err = 0;
}
mutex_unlock(&inet_diag_table_mutex);
out:
return err;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
void inet_diag_unregister(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
if (type >= IPPROTO_MAX)
return;
mutex_lock(&inet_diag_table_mutex);
inet_diag_table[type] = NULL;
mutex_unlock(&inet_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (IPPROTO_MAX *
sizeof(struct inet_diag_handler *));
int err = -ENOMEM;
inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
if (!inet_diag_table)
goto out;
err = sock_diag_register(&inet_diag_handler);
if (err)
goto out_free_nl;
err = sock_diag_register(&inet6_diag_handler);
if (err)
goto out_free_inet;
sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
out:
return err;
out_free_inet:
sock_diag_unregister(&inet_diag_handler);
out_free_nl:
kfree(inet_diag_table);
goto out;
}
static void __exit inet_diag_exit(void)
{
sock_diag_unregister(&inet6_diag_handler);
sock_diag_unregister(&inet_diag_handler);
sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
kfree(inet_diag_table);
}
module_init(inet_diag_init);
module_exit(inet_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 );
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 );
|
-----------------------------------------
-- ID: 5581
-- Item: Slice of Ziz Meat
-- Effect: 5 Minutes, food effect, Galka Only
-----------------------------------------
-- Strength +4
-- Intelligence -6
-----------------------------------------
require("scripts/globals/status");
-----------------------------------------
-- OnItemCheck
-----------------------------------------
function onItemCheck(target)
local result = 0;
if (target:getRace() ~= 8) then
result = 247;
end
if (target:getMod(MOD_EAT_RAW_MEAT) == 1) then
result = 0;
end
if (target:hasStatusEffect(EFFECT_FOOD) == true or target:hasStatusEffect(EFFECT_FIELD_SUPPORT_FOOD) == true) then
result = 246;
end
return result;
end;
-----------------------------------------
-- OnItemUse
-----------------------------------------
function onItemUse(target)
target:addStatusEffect(EFFECT_FOOD,0,0,300,5581);
end;
-----------------------------------------
-- onEffectGain Action
-----------------------------------
function onEffectGain(target,effect)
target:addMod(MOD_STR, 4);
target:addMod(MOD_INT,-6);
end;
-----------------------------------------
-- onEffectLose Action
-----------------------------------
function onEffectLose(target,effect)
target:delMod(MOD_STR, 4);
target:delMod(MOD_INT,-6);
end; |
@echo off
perl "%~dpn0" %*
|
/*
* software YUV to RGB converter
*
* Copyright (C) 2009 Konstantin Shishkov
*
* 1,4,8bpp support and context / deglobalize stuff
* by Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include "libavutil/cpu.h"
#include "libavutil/bswap.h"
#include "config.h"
#include "rgb2rgb.h"
#include "swscale.h"
#include "swscale_internal.h"
#include "libavutil/pixdesc.h"
extern const uint8_t dither_2x2_4[3][8];
extern const uint8_t dither_2x2_8[3][8];
extern const uint8_t dither_4x4_16[5][8];
extern const uint8_t dither_8x8_32[9][8];
extern const uint8_t dither_8x8_73[9][8];
extern const uint8_t dither_8x8_220[9][8];
const int32_t ff_yuv2rgb_coeffs[8][4] = {
{ 117504, 138453, 13954, 34903 }, /* no sequence_display_extension */
{ 117504, 138453, 13954, 34903 }, /* ITU-R Rec. 709 (1990) */
{ 104597, 132201, 25675, 53279 }, /* unspecified */
{ 104597, 132201, 25675, 53279 }, /* reserved */
{ 104448, 132798, 24759, 53109 }, /* FCC */
{ 104597, 132201, 25675, 53279 }, /* ITU-R Rec. 624-4 System B, G */
{ 104597, 132201, 25675, 53279 }, /* SMPTE 170M */
{ 117579, 136230, 16907, 35559 } /* SMPTE 240M (1987) */
};
const int *sws_getCoefficients(int colorspace)
{
if (colorspace > 7 || colorspace < 0)
colorspace = SWS_CS_DEFAULT;
return ff_yuv2rgb_coeffs[colorspace];
}
#define LOADCHROMA(i) \
U = pu[i]; \
V = pv[i]; \
r = (void *)c->table_rV[V+YUVRGB_TABLE_HEADROOM]; \
g = (void *)(c->table_gU[U+YUVRGB_TABLE_HEADROOM] + c->table_gV[V+YUVRGB_TABLE_HEADROOM]); \
b = (void *)c->table_bU[U+YUVRGB_TABLE_HEADROOM];
#define PUTRGB(dst, src, i) \
Y = src[2 * i]; \
dst[2 * i] = r[Y] + g[Y] + b[Y]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y] + g[Y] + b[Y];
#define PUTRGB24(dst, src, i) \
Y = src[2 * i]; \
dst[6 * i + 0] = r[Y]; \
dst[6 * i + 1] = g[Y]; \
dst[6 * i + 2] = b[Y]; \
Y = src[2 * i + 1]; \
dst[6 * i + 3] = r[Y]; \
dst[6 * i + 4] = g[Y]; \
dst[6 * i + 5] = b[Y];
#define PUTBGR24(dst, src, i) \
Y = src[2 * i]; \
dst[6 * i + 0] = b[Y]; \
dst[6 * i + 1] = g[Y]; \
dst[6 * i + 2] = r[Y]; \
Y = src[2 * i + 1]; \
dst[6 * i + 3] = b[Y]; \
dst[6 * i + 4] = g[Y]; \
dst[6 * i + 5] = r[Y];
#define PUTRGBA(dst, ysrc, asrc, i, s) \
Y = ysrc[2 * i]; \
dst[2 * i] = r[Y] + g[Y] + b[Y] + (asrc[2 * i] << s); \
Y = ysrc[2 * i + 1]; \
dst[2 * i + 1] = r[Y] + g[Y] + b[Y] + (asrc[2 * i + 1] << s);
#define PUTRGB48(dst, src, i) \
Y = src[ 2 * i]; \
dst[12 * i + 0] = dst[12 * i + 1] = r[Y]; \
dst[12 * i + 2] = dst[12 * i + 3] = g[Y]; \
dst[12 * i + 4] = dst[12 * i + 5] = b[Y]; \
Y = src[ 2 * i + 1]; \
dst[12 * i + 6] = dst[12 * i + 7] = r[Y]; \
dst[12 * i + 8] = dst[12 * i + 9] = g[Y]; \
dst[12 * i + 10] = dst[12 * i + 11] = b[Y];
#define PUTBGR48(dst, src, i) \
Y = src[2 * i]; \
dst[12 * i + 0] = dst[12 * i + 1] = b[Y]; \
dst[12 * i + 2] = dst[12 * i + 3] = g[Y]; \
dst[12 * i + 4] = dst[12 * i + 5] = r[Y]; \
Y = src[2 * i + 1]; \
dst[12 * i + 6] = dst[12 * i + 7] = b[Y]; \
dst[12 * i + 8] = dst[12 * i + 9] = g[Y]; \
dst[12 * i + 10] = dst[12 * i + 11] = r[Y];
#define YUV2RGBFUNC(func_name, dst_type, alpha) \
static int func_name(SwsContext *c, const uint8_t *src[], \
int srcStride[], int srcSliceY, int srcSliceH, \
uint8_t *dst[], int dstStride[]) \
{ \
int y; \
\
if (!alpha && c->srcFormat == AV_PIX_FMT_YUV422P) { \
srcStride[1] *= 2; \
srcStride[2] *= 2; \
} \
for (y = 0; y < srcSliceH; y += 2) { \
dst_type *dst_1 = \
(dst_type *)(dst[0] + (y + srcSliceY) * dstStride[0]); \
dst_type *dst_2 = \
(dst_type *)(dst[0] + (y + srcSliceY + 1) * dstStride[0]); \
dst_type av_unused *r, *g, *b; \
const uint8_t *py_1 = src[0] + y * srcStride[0]; \
const uint8_t *py_2 = py_1 + srcStride[0]; \
const uint8_t *pu = src[1] + (y >> 1) * srcStride[1]; \
const uint8_t *pv = src[2] + (y >> 1) * srcStride[2]; \
const uint8_t av_unused *pa_1, *pa_2; \
unsigned int h_size = c->dstW >> 3; \
if (alpha) { \
pa_1 = src[3] + y * srcStride[3]; \
pa_2 = pa_1 + srcStride[3]; \
} \
while (h_size--) { \
int av_unused U, V, Y; \
#define ENDYUV2RGBLINE(dst_delta, ss) \
pu += 4 >> ss; \
pv += 4 >> ss; \
py_1 += 8 >> ss; \
py_2 += 8 >> ss; \
dst_1 += dst_delta >> ss; \
dst_2 += dst_delta >> ss; \
} \
if (c->dstW & (4 >> ss)) { \
int av_unused Y, U, V; \
#define ENDYUV2RGBFUNC() \
} \
} \
return srcSliceH; \
}
#define CLOSEYUV2RGBFUNC(dst_delta) \
ENDYUV2RGBLINE(dst_delta, 0) \
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuv2rgb_c_48, uint8_t, 0)
LOADCHROMA(0);
PUTRGB48(dst_1, py_1, 0);
PUTRGB48(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB48(dst_2, py_2, 1);
PUTRGB48(dst_1, py_1, 1);
LOADCHROMA(2);
PUTRGB48(dst_1, py_1, 2);
PUTRGB48(dst_2, py_2, 2);
LOADCHROMA(3);
PUTRGB48(dst_2, py_2, 3);
PUTRGB48(dst_1, py_1, 3);
ENDYUV2RGBLINE(48, 0)
LOADCHROMA(0);
PUTRGB48(dst_1, py_1, 0);
PUTRGB48(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB48(dst_2, py_2, 1);
PUTRGB48(dst_1, py_1, 1);
ENDYUV2RGBLINE(48, 1)
LOADCHROMA(0);
PUTRGB48(dst_1, py_1, 0);
PUTRGB48(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuv2rgb_c_bgr48, uint8_t, 0)
LOADCHROMA(0);
PUTBGR48(dst_1, py_1, 0);
PUTBGR48(dst_2, py_2, 0);
LOADCHROMA(1);
PUTBGR48(dst_2, py_2, 1);
PUTBGR48(dst_1, py_1, 1);
LOADCHROMA(2);
PUTBGR48(dst_1, py_1, 2);
PUTBGR48(dst_2, py_2, 2);
LOADCHROMA(3);
PUTBGR48(dst_2, py_2, 3);
PUTBGR48(dst_1, py_1, 3);
ENDYUV2RGBLINE(48, 0)
LOADCHROMA(0);
PUTBGR48(dst_1, py_1, 0);
PUTBGR48(dst_2, py_2, 0);
LOADCHROMA(1);
PUTBGR48(dst_2, py_2, 1);
PUTBGR48(dst_1, py_1, 1);
ENDYUV2RGBLINE(48, 1)
LOADCHROMA(0);
PUTBGR48(dst_1, py_1, 0);
PUTBGR48(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuv2rgb_c_32, uint32_t, 0)
LOADCHROMA(0);
PUTRGB(dst_1, py_1, 0);
PUTRGB(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB(dst_2, py_2, 1);
PUTRGB(dst_1, py_1, 1);
LOADCHROMA(2);
PUTRGB(dst_1, py_1, 2);
PUTRGB(dst_2, py_2, 2);
LOADCHROMA(3);
PUTRGB(dst_2, py_2, 3);
PUTRGB(dst_1, py_1, 3);
ENDYUV2RGBLINE(8, 0)
LOADCHROMA(0);
PUTRGB(dst_1, py_1, 0);
PUTRGB(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB(dst_2, py_2, 1);
PUTRGB(dst_1, py_1, 1);
ENDYUV2RGBLINE(8, 1)
LOADCHROMA(0);
PUTRGB(dst_1, py_1, 0);
PUTRGB(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuva2rgba_c, uint32_t, 1)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 24);
PUTRGBA(dst_2, py_2, pa_2, 0, 24);
LOADCHROMA(1);
PUTRGBA(dst_2, py_2, pa_2, 1, 24);
PUTRGBA(dst_1, py_1, pa_1, 1, 24);
LOADCHROMA(2);
PUTRGBA(dst_1, py_1, pa_1, 2, 24);
PUTRGBA(dst_2, py_2, pa_2, 2, 24);
LOADCHROMA(3);
PUTRGBA(dst_2, py_2, pa_2, 3, 24);
PUTRGBA(dst_1, py_1, pa_1, 3, 24);
pa_1 += 8;
pa_2 += 8;
ENDYUV2RGBLINE(8, 0)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 24);
PUTRGBA(dst_2, py_2, pa_2, 0, 24);
LOADCHROMA(1);
PUTRGBA(dst_2, py_2, pa_2, 1, 24);
PUTRGBA(dst_1, py_1, pa_1, 1, 24);
pa_1 += 4;
pa_2 += 4;
ENDYUV2RGBLINE(8, 1)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 24);
PUTRGBA(dst_2, py_2, pa_2, 0, 24);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuva2argb_c, uint32_t, 1)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 0);
PUTRGBA(dst_2, py_2, pa_2, 0, 0);
LOADCHROMA(1);
PUTRGBA(dst_2, py_2, pa_2, 1, 0);
PUTRGBA(dst_1, py_1, pa_1, 1, 0);
LOADCHROMA(2);
PUTRGBA(dst_1, py_1, pa_1, 2, 0);
PUTRGBA(dst_2, py_2, pa_2, 2, 0);
LOADCHROMA(3);
PUTRGBA(dst_2, py_2, pa_2, 3, 0);
PUTRGBA(dst_1, py_1, pa_1, 3, 0);
pa_1 += 8;
pa_2 += 8;
ENDYUV2RGBLINE(8, 0)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 0);
PUTRGBA(dst_2, py_2, pa_2, 0, 0);
LOADCHROMA(1);
PUTRGBA(dst_2, py_2, pa_2, 1, 0);
PUTRGBA(dst_1, py_1, pa_1, 1, 0);
pa_1 += 4;
pa_2 += 4;
ENDYUV2RGBLINE(8, 1)
LOADCHROMA(0);
PUTRGBA(dst_1, py_1, pa_1, 0, 0);
PUTRGBA(dst_2, py_2, pa_2, 0, 0);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuv2rgb_c_24_rgb, uint8_t, 0)
LOADCHROMA(0);
PUTRGB24(dst_1, py_1, 0);
PUTRGB24(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB24(dst_2, py_2, 1);
PUTRGB24(dst_1, py_1, 1);
LOADCHROMA(2);
PUTRGB24(dst_1, py_1, 2);
PUTRGB24(dst_2, py_2, 2);
LOADCHROMA(3);
PUTRGB24(dst_2, py_2, 3);
PUTRGB24(dst_1, py_1, 3);
ENDYUV2RGBLINE(24, 0)
LOADCHROMA(0);
PUTRGB24(dst_1, py_1, 0);
PUTRGB24(dst_2, py_2, 0);
LOADCHROMA(1);
PUTRGB24(dst_2, py_2, 1);
PUTRGB24(dst_1, py_1, 1);
ENDYUV2RGBLINE(24, 1)
LOADCHROMA(0);
PUTRGB24(dst_1, py_1, 0);
PUTRGB24(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
// only trivial mods from yuv2rgb_c_24_rgb
YUV2RGBFUNC(yuv2rgb_c_24_bgr, uint8_t, 0)
LOADCHROMA(0);
PUTBGR24(dst_1, py_1, 0);
PUTBGR24(dst_2, py_2, 0);
LOADCHROMA(1);
PUTBGR24(dst_2, py_2, 1);
PUTBGR24(dst_1, py_1, 1);
LOADCHROMA(2);
PUTBGR24(dst_1, py_1, 2);
PUTBGR24(dst_2, py_2, 2);
LOADCHROMA(3);
PUTBGR24(dst_2, py_2, 3);
PUTBGR24(dst_1, py_1, 3);
ENDYUV2RGBLINE(24, 0)
LOADCHROMA(0);
PUTBGR24(dst_1, py_1, 0);
PUTBGR24(dst_2, py_2, 0);
LOADCHROMA(1);
PUTBGR24(dst_2, py_2, 1);
PUTBGR24(dst_1, py_1, 1);
ENDYUV2RGBLINE(24, 1)
LOADCHROMA(0);
PUTBGR24(dst_1, py_1, 0);
PUTBGR24(dst_2, py_2, 0);
ENDYUV2RGBFUNC()
YUV2RGBFUNC(yuv2rgb_c_16_ordered_dither, uint16_t, 0)
const uint8_t *d16 = dither_2x2_8[y & 1];
const uint8_t *e16 = dither_2x2_4[y & 1];
const uint8_t *f16 = dither_2x2_8[(y & 1)^1];
#define PUTRGB16(dst, src, i, o) \
Y = src[2 * i]; \
dst[2 * i] = r[Y + d16[0 + o]] + \
g[Y + e16[0 + o]] + \
b[Y + f16[0 + o]]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y + d16[1 + o]] + \
g[Y + e16[1 + o]] + \
b[Y + f16[1 + o]];
LOADCHROMA(0);
PUTRGB16(dst_1, py_1, 0, 0);
PUTRGB16(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB16(dst_2, py_2, 1, 2 + 8);
PUTRGB16(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB16(dst_1, py_1, 2, 4);
PUTRGB16(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB16(dst_2, py_2, 3, 6 + 8);
PUTRGB16(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
YUV2RGBFUNC(yuv2rgb_c_15_ordered_dither, uint16_t, 0)
const uint8_t *d16 = dither_2x2_8[y & 1];
const uint8_t *e16 = dither_2x2_8[(y & 1)^1];
#define PUTRGB15(dst, src, i, o) \
Y = src[2 * i]; \
dst[2 * i] = r[Y + d16[0 + o]] + \
g[Y + d16[1 + o]] + \
b[Y + e16[0 + o]]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y + d16[1 + o]] + \
g[Y + d16[0 + o]] + \
b[Y + e16[1 + o]];
LOADCHROMA(0);
PUTRGB15(dst_1, py_1, 0, 0);
PUTRGB15(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB15(dst_2, py_2, 1, 2 + 8);
PUTRGB15(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB15(dst_1, py_1, 2, 4);
PUTRGB15(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB15(dst_2, py_2, 3, 6 + 8);
PUTRGB15(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
// r, g, b, dst_1, dst_2
YUV2RGBFUNC(yuv2rgb_c_12_ordered_dither, uint16_t, 0)
const uint8_t *d16 = dither_4x4_16[y & 3];
#define PUTRGB12(dst, src, i, o) \
Y = src[2 * i]; \
dst[2 * i] = r[Y + d16[0 + o]] + \
g[Y + d16[0 + o]] + \
b[Y + d16[0 + o]]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y + d16[1 + o]] + \
g[Y + d16[1 + o]] + \
b[Y + d16[1 + o]];
LOADCHROMA(0);
PUTRGB12(dst_1, py_1, 0, 0);
PUTRGB12(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB12(dst_2, py_2, 1, 2 + 8);
PUTRGB12(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB12(dst_1, py_1, 2, 4);
PUTRGB12(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB12(dst_2, py_2, 3, 6 + 8);
PUTRGB12(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
// r, g, b, dst_1, dst_2
YUV2RGBFUNC(yuv2rgb_c_8_ordered_dither, uint8_t, 0)
const uint8_t *d32 = dither_8x8_32[y & 7];
const uint8_t *d64 = dither_8x8_73[y & 7];
#define PUTRGB8(dst, src, i, o) \
Y = src[2 * i]; \
dst[2 * i] = r[Y + d32[0 + o]] + \
g[Y + d32[0 + o]] + \
b[Y + d64[0 + o]]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y + d32[1 + o]] + \
g[Y + d32[1 + o]] + \
b[Y + d64[1 + o]];
LOADCHROMA(0);
PUTRGB8(dst_1, py_1, 0, 0);
PUTRGB8(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB8(dst_2, py_2, 1, 2 + 8);
PUTRGB8(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB8(dst_1, py_1, 2, 4);
PUTRGB8(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB8(dst_2, py_2, 3, 6 + 8);
PUTRGB8(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
YUV2RGBFUNC(yuv2rgb_c_4_ordered_dither, uint8_t, 0)
const uint8_t * d64 = dither_8x8_73[y & 7];
const uint8_t *d128 = dither_8x8_220[y & 7];
int acc;
#define PUTRGB4D(dst, src, i, o) \
Y = src[2 * i]; \
acc = r[Y + d128[0 + o]] + \
g[Y + d64[0 + o]] + \
b[Y + d128[0 + o]]; \
Y = src[2 * i + 1]; \
acc |= (r[Y + d128[1 + o]] + \
g[Y + d64[1 + o]] + \
b[Y + d128[1 + o]]) << 4; \
dst[i] = acc;
LOADCHROMA(0);
PUTRGB4D(dst_1, py_1, 0, 0);
PUTRGB4D(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB4D(dst_2, py_2, 1, 2 + 8);
PUTRGB4D(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB4D(dst_1, py_1, 2, 4);
PUTRGB4D(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB4D(dst_2, py_2, 3, 6 + 8);
PUTRGB4D(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(4)
YUV2RGBFUNC(yuv2rgb_c_4b_ordered_dither, uint8_t, 0)
const uint8_t *d64 = dither_8x8_73[y & 7];
const uint8_t *d128 = dither_8x8_220[y & 7];
#define PUTRGB4DB(dst, src, i, o) \
Y = src[2 * i]; \
dst[2 * i] = r[Y + d128[0 + o]] + \
g[Y + d64[0 + o]] + \
b[Y + d128[0 + o]]; \
Y = src[2 * i + 1]; \
dst[2 * i + 1] = r[Y + d128[1 + o]] + \
g[Y + d64[1 + o]] + \
b[Y + d128[1 + o]];
LOADCHROMA(0);
PUTRGB4DB(dst_1, py_1, 0, 0);
PUTRGB4DB(dst_2, py_2, 0, 0 + 8);
LOADCHROMA(1);
PUTRGB4DB(dst_2, py_2, 1, 2 + 8);
PUTRGB4DB(dst_1, py_1, 1, 2);
LOADCHROMA(2);
PUTRGB4DB(dst_1, py_1, 2, 4);
PUTRGB4DB(dst_2, py_2, 2, 4 + 8);
LOADCHROMA(3);
PUTRGB4DB(dst_2, py_2, 3, 6 + 8);
PUTRGB4DB(dst_1, py_1, 3, 6);
CLOSEYUV2RGBFUNC(8)
YUV2RGBFUNC(yuv2rgb_c_1_ordered_dither, uint8_t, 0)
const uint8_t *d128 = dither_8x8_220[y & 7];
char out_1 = 0, out_2 = 0;
g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM];
#define PUTRGB1(out, src, i, o) \
Y = src[2 * i]; \
out += out + g[Y + d128[0 + o]]; \
Y = src[2 * i + 1]; \
out += out + g[Y + d128[1 + o]];
PUTRGB1(out_1, py_1, 0, 0);
PUTRGB1(out_2, py_2, 0, 0 + 8);
PUTRGB1(out_2, py_2, 1, 2 + 8);
PUTRGB1(out_1, py_1, 1, 2);
PUTRGB1(out_1, py_1, 2, 4);
PUTRGB1(out_2, py_2, 2, 4 + 8);
PUTRGB1(out_2, py_2, 3, 6 + 8);
PUTRGB1(out_1, py_1, 3, 6);
dst_1[0] = out_1;
dst_2[0] = out_2;
CLOSEYUV2RGBFUNC(1)
SwsFunc ff_yuv2rgb_get_func_ptr(SwsContext *c)
{
SwsFunc t = NULL;
if (HAVE_MMX)
t = ff_yuv2rgb_init_mmx(c);
else if (HAVE_VIS)
t = ff_yuv2rgb_init_vis(c);
else if (HAVE_ALTIVEC)
t = ff_yuv2rgb_init_altivec(c);
else if (ARCH_BFIN)
t = ff_yuv2rgb_get_func_ptr_bfin(c);
if (t)
return t;
av_log(c, AV_LOG_WARNING,
"No accelerated colorspace conversion found from %s to %s.\n",
av_get_pix_fmt_name(c->srcFormat), av_get_pix_fmt_name(c->dstFormat));
switch (c->dstFormat) {
case AV_PIX_FMT_BGR48BE:
case AV_PIX_FMT_BGR48LE:
return yuv2rgb_c_bgr48;
case AV_PIX_FMT_RGB48BE:
case AV_PIX_FMT_RGB48LE:
return yuv2rgb_c_48;
case AV_PIX_FMT_ARGB:
case AV_PIX_FMT_ABGR:
if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat))
return yuva2argb_c;
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_BGRA:
return (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat)) ? yuva2rgba_c : yuv2rgb_c_32;
case AV_PIX_FMT_RGB24:
return yuv2rgb_c_24_rgb;
case AV_PIX_FMT_BGR24:
return yuv2rgb_c_24_bgr;
case AV_PIX_FMT_RGB565:
case AV_PIX_FMT_BGR565:
return yuv2rgb_c_16_ordered_dither;
case AV_PIX_FMT_RGB555:
case AV_PIX_FMT_BGR555:
return yuv2rgb_c_15_ordered_dither;
case AV_PIX_FMT_RGB444:
case AV_PIX_FMT_BGR444:
return yuv2rgb_c_12_ordered_dither;
case AV_PIX_FMT_RGB8:
case AV_PIX_FMT_BGR8:
return yuv2rgb_c_8_ordered_dither;
case AV_PIX_FMT_RGB4:
case AV_PIX_FMT_BGR4:
return yuv2rgb_c_4_ordered_dither;
case AV_PIX_FMT_RGB4_BYTE:
case AV_PIX_FMT_BGR4_BYTE:
return yuv2rgb_c_4b_ordered_dither;
case AV_PIX_FMT_MONOBLACK:
return yuv2rgb_c_1_ordered_dither;
}
return NULL;
}
static void fill_table(uint8_t* table[256 + 2*YUVRGB_TABLE_HEADROOM], const int elemsize,
const int64_t inc, void *y_tab)
{
int i;
uint8_t *y_table = y_tab;
y_table -= elemsize * (inc >> 9);
for (i = 0; i < 256 + 2*YUVRGB_TABLE_HEADROOM; i++) {
int64_t cb = av_clip(i-YUVRGB_TABLE_HEADROOM, 0, 255)*inc;
table[i] = y_table + elemsize * (cb >> 16);
}
}
static void fill_gv_table(int table[256 + 2*YUVRGB_TABLE_HEADROOM], const int elemsize, const int64_t inc)
{
int i;
int off = -(inc >> 9);
for (i = 0; i < 256 + 2*YUVRGB_TABLE_HEADROOM; i++) {
int64_t cb = av_clip(i-YUVRGB_TABLE_HEADROOM, 0, 255)*inc;
table[i] = elemsize * (off + (cb >> 16));
}
}
static uint16_t roundToInt16(int64_t f)
{
int r = (f + (1 << 15)) >> 16;
if (r < -0x7FFF)
return 0x8000;
else if (r > 0x7FFF)
return 0x7FFF;
else
return r;
}
av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
int fullRange, int brightness,
int contrast, int saturation)
{
const int isRgb = c->dstFormat == AV_PIX_FMT_RGB32 ||
c->dstFormat == AV_PIX_FMT_RGB32_1 ||
c->dstFormat == AV_PIX_FMT_BGR24 ||
c->dstFormat == AV_PIX_FMT_RGB565BE ||
c->dstFormat == AV_PIX_FMT_RGB565LE ||
c->dstFormat == AV_PIX_FMT_RGB555BE ||
c->dstFormat == AV_PIX_FMT_RGB555LE ||
c->dstFormat == AV_PIX_FMT_RGB444BE ||
c->dstFormat == AV_PIX_FMT_RGB444LE ||
c->dstFormat == AV_PIX_FMT_RGB8 ||
c->dstFormat == AV_PIX_FMT_RGB4 ||
c->dstFormat == AV_PIX_FMT_RGB4_BYTE ||
c->dstFormat == AV_PIX_FMT_MONOBLACK;
const int isNotNe = c->dstFormat == AV_PIX_FMT_NE(RGB565LE, RGB565BE) ||
c->dstFormat == AV_PIX_FMT_NE(RGB555LE, RGB555BE) ||
c->dstFormat == AV_PIX_FMT_NE(RGB444LE, RGB444BE) ||
c->dstFormat == AV_PIX_FMT_NE(BGR565LE, BGR565BE) ||
c->dstFormat == AV_PIX_FMT_NE(BGR555LE, BGR555BE) ||
c->dstFormat == AV_PIX_FMT_NE(BGR444LE, BGR444BE);
const int bpp = c->dstFormatBpp;
uint8_t *y_table;
uint16_t *y_table16;
uint32_t *y_table32;
int i, base, rbase, gbase, bbase, av_uninit(abase), needAlpha;
const int yoffs = fullRange ? 384 : 326;
int64_t crv = inv_table[0];
int64_t cbu = inv_table[1];
int64_t cgu = -inv_table[2];
int64_t cgv = -inv_table[3];
int64_t cy = 1 << 16;
int64_t oy = 0;
int64_t yb = 0;
if (!fullRange) {
cy = (cy * 255) / 219;
oy = 16 << 16;
} else {
crv = (crv * 224) / 255;
cbu = (cbu * 224) / 255;
cgu = (cgu * 224) / 255;
cgv = (cgv * 224) / 255;
}
cy = (cy * contrast) >> 16;
crv = (crv * contrast * saturation) >> 32;
cbu = (cbu * contrast * saturation) >> 32;
cgu = (cgu * contrast * saturation) >> 32;
cgv = (cgv * contrast * saturation) >> 32;
oy -= 256 * brightness;
c->uOffset = 0x0400040004000400LL;
c->vOffset = 0x0400040004000400LL;
c->yCoeff = roundToInt16(cy * 8192) * 0x0001000100010001ULL;
c->vrCoeff = roundToInt16(crv * 8192) * 0x0001000100010001ULL;
c->ubCoeff = roundToInt16(cbu * 8192) * 0x0001000100010001ULL;
c->vgCoeff = roundToInt16(cgv * 8192) * 0x0001000100010001ULL;
c->ugCoeff = roundToInt16(cgu * 8192) * 0x0001000100010001ULL;
c->yOffset = roundToInt16(oy * 8) * 0x0001000100010001ULL;
c->yuv2rgb_y_coeff = (int16_t)roundToInt16(cy << 13);
c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9);
c->yuv2rgb_v2r_coeff = (int16_t)roundToInt16(crv << 13);
c->yuv2rgb_v2g_coeff = (int16_t)roundToInt16(cgv << 13);
c->yuv2rgb_u2g_coeff = (int16_t)roundToInt16(cgu << 13);
c->yuv2rgb_u2b_coeff = (int16_t)roundToInt16(cbu << 13);
//scale coefficients by cy
crv = ((crv << 16) + 0x8000) / cy;
cbu = ((cbu << 16) + 0x8000) / cy;
cgu = ((cgu << 16) + 0x8000) / cy;
cgv = ((cgv << 16) + 0x8000) / cy;
av_free(c->yuvTable);
switch (bpp) {
case 1:
c->yuvTable = av_malloc(1024);
y_table = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024 - 110; i++) {
y_table[i + 110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7;
yb += cy;
}
fill_table(c->table_gU, 1, cgu, y_table + yoffs);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 4:
case 4 | 128:
rbase = isRgb ? 3 : 0;
gbase = 1;
bbase = isRgb ? 0 : 3;
c->yuvTable = av_malloc(1024 * 3);
y_table = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024 - 110; i++) {
int yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table[i + 110] = (yval >> 7) << rbase;
y_table[i + 37 + 1024] = ((yval + 43) / 85) << gbase;
y_table[i + 110 + 2048] = (yval >> 7) << bbase;
yb += cy;
}
fill_table(c->table_rV, 1, crv, y_table + yoffs);
fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 8:
rbase = isRgb ? 5 : 0;
gbase = isRgb ? 2 : 3;
bbase = isRgb ? 0 : 6;
c->yuvTable = av_malloc(1024 * 3);
y_table = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024 - 38; i++) {
int yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table[i + 16] = ((yval + 18) / 36) << rbase;
y_table[i + 16 + 1024] = ((yval + 18) / 36) << gbase;
y_table[i + 37 + 2048] = ((yval + 43) / 85) << bbase;
yb += cy;
}
fill_table(c->table_rV, 1, crv, y_table + yoffs);
fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 12:
rbase = isRgb ? 8 : 0;
gbase = 4;
bbase = isRgb ? 0 : 8;
c->yuvTable = av_malloc(1024 * 3 * 2);
y_table16 = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024; i++) {
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table16[i] = (yval >> 4) << rbase;
y_table16[i + 1024] = (yval >> 4) << gbase;
y_table16[i + 2048] = (yval >> 4) << bbase;
yb += cy;
}
if (isNotNe)
for (i = 0; i < 1024 * 3; i++)
y_table16[i] = av_bswap16(y_table16[i]);
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
fill_gv_table(c->table_gV, 2, cgv);
break;
case 15:
case 16:
rbase = isRgb ? bpp - 5 : 0;
gbase = 5;
bbase = isRgb ? 0 : (bpp - 5);
c->yuvTable = av_malloc(1024 * 3 * 2);
y_table16 = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024; i++) {
uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table16[i] = (yval >> 3) << rbase;
y_table16[i + 1024] = (yval >> (18 - bpp)) << gbase;
y_table16[i + 2048] = (yval >> 3) << bbase;
yb += cy;
}
if (isNotNe)
for (i = 0; i < 1024 * 3; i++)
y_table16[i] = av_bswap16(y_table16[i]);
fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
fill_gv_table(c->table_gV, 2, cgv);
break;
case 24:
case 48:
c->yuvTable = av_malloc(1024);
y_table = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024; i++) {
y_table[i] = av_clip_uint8((yb + 0x8000) >> 16);
yb += cy;
}
fill_table(c->table_rV, 1, crv, y_table + yoffs);
fill_table(c->table_gU, 1, cgu, y_table + yoffs);
fill_table(c->table_bU, 1, cbu, y_table + yoffs);
fill_gv_table(c->table_gV, 1, cgv);
break;
case 32:
case 64:
base = (c->dstFormat == AV_PIX_FMT_RGB32_1 ||
c->dstFormat == AV_PIX_FMT_BGR32_1) ? 8 : 0;
rbase = base + (isRgb ? 16 : 0);
gbase = base + 8;
bbase = base + (isRgb ? 0 : 16);
needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat);
if (!needAlpha)
abase = (base + 24) & 31;
c->yuvTable = av_malloc(1024 * 3 * 4);
y_table32 = c->yuvTable;
yb = -(384 << 16) - oy;
for (i = 0; i < 1024; i++) {
unsigned yval = av_clip_uint8((yb + 0x8000) >> 16);
y_table32[i] = (yval << rbase) +
(needAlpha ? 0 : (255u << abase));
y_table32[i + 1024] = yval << gbase;
y_table32[i + 2048] = yval << bbase;
yb += cy;
}
fill_table(c->table_rV, 4, crv, y_table32 + yoffs);
fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024);
fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048);
fill_gv_table(c->table_gV, 4, cgv);
break;
default:
c->yuvTable = NULL;
if(!isPlanar(c->dstFormat) || bpp <= 24)
av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp);
return -1;
}
return 0;
}
|
# gioctl [](http://godoc.org/github.com/wolfeidau/gioctl)
Simple library which provides golang versions of the ioctl macros in linux.
# References
* https://github.com/luismesas/goPi started with the IOCTL stuff from this project initally.
* http://www.circlemud.org/jelson/software/fusd/docs/node31.html good information on IOCTL macros.
# License
This code is Copyright (c) 2014 Mark Wolfe and licenced under the MIT licence. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details. |
// -----------------------------------------------------------------------------
// Globals
// Major version of Flash required
var requiredMajorVersion = 8;
// Minor version of Flash required
var requiredMinorVersion = 0;
// Minor version of Flash required
var requiredRevision = 0;
// the version of javascript supported
var jsVersion = 1.0;
// -----------------------------------------------------------------------------
var isIE = (navigator.appVersion.indexOf("MSIE") != -1) ? true : false;
var isWin = (navigator.appVersion.toLowerCase().indexOf("win") != -1) ? true : false;
var isOpera = (navigator.userAgent.indexOf("Opera") != -1) ? true : false;
jsVersion = 1.1;
// JavaScript helper required to detect Flash Player PlugIn version information
function JSGetSwfVer(i){
// NS/Opera version >= 3 check for Flash plugin in plugin array
if (navigator.plugins != null && navigator.plugins.length > 0) {
if (navigator.plugins["Shockwave Flash 2.0"] || navigator.plugins["Shockwave Flash"]) {
var swVer2 = navigator.plugins["Shockwave Flash 2.0"] ? " 2.0" : "";
var flashDescription = navigator.plugins["Shockwave Flash" + swVer2].description;
descArray = flashDescription.split(" ");
tempArrayMajor = descArray[2].split(".");
versionMajor = tempArrayMajor[0];
versionMinor = tempArrayMajor[1];
if ( descArray[3] != "" ) {
tempArrayMinor = descArray[3].split("r");
} else {
tempArrayMinor = descArray[4].split("r");
}
versionRevision = tempArrayMinor[1] > 0 ? tempArrayMinor[1] : 0;
flashVer = versionMajor + "." + versionMinor + "." + versionRevision;
} else {
flashVer = -1;
}
}
// MSN/WebTV 2.6 supports Flash 4
else if (navigator.userAgent.toLowerCase().indexOf("webtv/2.6") != -1) flashVer = 4;
// WebTV 2.5 supports Flash 3
else if (navigator.userAgent.toLowerCase().indexOf("webtv/2.5") != -1) flashVer = 3;
// older WebTV supports Flash 2
else if (navigator.userAgent.toLowerCase().indexOf("webtv") != -1) flashVer = 2;
// Can't detect in all other cases
else {
flashVer = -1;
}
return flashVer;
}
// When called with reqMajorVer, reqMinorVer, reqRevision returns true if that version or greater is available
function DetectFlashVer(reqMajorVer, reqMinorVer, reqRevision)
{
reqVer = parseFloat(reqMajorVer + "." + reqRevision);
// loop backwards through the versions until we find the newest version
for (i=25;i>0;i--) {
if (isIE && isWin && !isOpera) {
versionStr = VBGetSwfVer(i);
} else {
versionStr = JSGetSwfVer(i);
}
if (versionStr == -1 ) {
return false;
} else if (versionStr != 0) {
if(isIE && isWin && !isOpera) {
tempArray = versionStr.split(" ");
tempString = tempArray[1];
versionArray = tempString .split(",");
} else {
versionArray = versionStr.split(".");
}
versionMajor = versionArray[0];
versionMinor = versionArray[1];
versionRevision = versionArray[2];
versionString = versionMajor + "." + versionRevision; // 7.0r24 == 7.24
versionNum = parseFloat(versionString);
// is the major.revision >= requested major.revision AND the minor version >= requested minor
if ( (versionMajor > reqMajorVer) && (versionNum >= reqVer) ) {
return true;
} else {
return ((versionNum >= reqVer && versionMinor >= reqMinorVer) ? true : false );
}
}
}
}
|
#
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
/**
*
* @defgroup app_pwm_config PWM functionality configuration
* @{
* @ingroup app_pwm
*/
/** @brief Enabling PWM module
*
* Set to 1 to activate.
*
* @note This is an NRF_CONFIG macro.
*/
#define APP_PWM_ENABLED
/** @} */
|
package com.puppycrawl.tools.checkstyle.checks.design;
public class InputMutableException {
public class FooException extends Exception {
private final int finalErrorCode;
private int errorCode = 1;
public FooException() {
finalErrorCode = 1;
}
public class FooExceptionThisIsNot extends RuntimeException {
private final int finalErrorCode;
private int errorCode = 1;
/** constructor */
public FooExceptionThisIsNot() {
finalErrorCode = 1;
}
}
}
public class BarError extends Throwable {
private int errorCode;
}
public class BazDoesNotExtendError {
private int errorCode;
}
public class CustomProblem extends ThreadDeath {
private int errorCode;
public class CustomFailure extends ThreadDeath {
private int errorCode;
public void someMethod() {
if(true) {
final int i = 0;
}
}
}
}
class CustomException extends java.lang.Exception {}
class CustomMutableException extends java.lang.Exception {
int errorCode;
}
}
|
/*
LUFA Library
Copyright (C) Dean Camera, 2011.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2011 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
/** \file
*
* Header file for TCP.c.
*/
#ifndef _TCP_H_
#define _TCP_H_
/* Includes: */
#include <avr/io.h>
#include <stdbool.h>
#include "EthernetProtocols.h"
#include "Ethernet.h"
#include "ProtocolDecoders.h"
/* Macros: */
/** Maximum number of TCP ports which can be open at the one time. */
#define MAX_OPEN_TCP_PORTS 1
/** Maximum number of TCP connections which can be sustained at the one time. */
#define MAX_TCP_CONNECTIONS 3
/** TCP window size, giving the maximum number of bytes which can be buffered at the one time. */
#define TCP_WINDOW_SIZE 512
/** Port number for HTTP transmissions. */
#define TCP_PORT_HTTP SwapEndian_16(80)
/** Data direction indicator for a TCP application buffer, indicating data from host-to-device. */
#define TCP_PACKETDIR_IN false
/** Data direction indicator for a TCP application buffer, indicating data from device-to-host. */
#define TCP_PACKETDIR_OUT true
/** Congestion Window Reduced TCP flag mask. */
#define TCP_FLAG_CWR (1 << 7)
/** Explicit Congestion Notification TCP flag mask. */
#define TCP_FLAG_ECE (1 << 6)
/** Urgent TCP flag mask. */
#define TCP_FLAG_URG (1 << 5)
/** Data Acknowledge TCP flag mask. */
#define TCP_FLAG_ACK (1 << 4)
/** Data Push TCP flag mask. */
#define TCP_FLAG_PSH (1 << 3)
/** Reset TCP flag mask. */
#define TCP_FLAG_RST (1 << 2)
/** Synchronize TCP flag mask. */
#define TCP_FLAG_SYN (1 << 1)
/** Connection Finalize TCP flag mask. */
#define TCP_FLAG_FIN (1 << 0)
/** Application macro: Determines if the given application buffer contains a packet received from the host
*
* \param[in] Buffer Application buffer to check
*
* \return Boolean true if the buffer contains a packet from the host, false otherwise
*/
#define TCP_APP_HAS_RECEIVED_PACKET(Buffer) (Buffer->Ready && (Buffer->Direction == TCP_PACKETDIR_IN))
/** Application macro: Indicates if the application buffer is currently locked by the application for device-to-host transfers.
*
* \param[in] Buffer Application buffer to check
*
* \return Boolean true if the buffer has been captured by the application for device-to-host transmissions, false otherwise
*/
#define TCP_APP_HAVE_CAPTURED_BUFFER(Buffer) (!(Buffer->Ready) && Buffer->InUse && (Buffer->Direction == TCP_PACKETDIR_OUT))
/** Application macro: Indicates if the application can lock the buffer for multiple continued device-to-host transmissions.
*
* \param[in] Buffer Application buffer to check
*
* \return Boolean true if the buffer may be captured by the application for device-to-host transmissions, false otherwise
*/
#define TCP_APP_CAN_CAPTURE_BUFFER(Buffer) Buffer->InUse
/** Application macro: Captures the application buffer, locking it for device-to-host transmissions only. This should be
* performed when the application needs to transmit several packets worth of data in succession with no interruptions from the host.
*
* \pre The application must check that the buffer can be locked first using TCP_APP_CAN_CAPTURE_BUFFER().
*
* \param[in] Buffer Application buffer to lock
*/
#define TCP_APP_CAPTURE_BUFFER(Buffer) MACROS{ Buffer->Direction = TCP_PACKETDIR_OUT; Buffer->InUse = true; }MACROE
/** Application macro: Releases a captured application buffer, allowing for host-to-device packets to be received.
*
* \param[in] Buffer Application buffer to release
*/
#define TCP_APP_RELEASE_BUFFER(Buffer) MACROS{ Buffer->InUse = false; }MACROE
/** Application macro: Sends the contents of the given application buffer to the host.
*
* \param[in] Buffer Application buffer to send
* \param[in] Len Length of data contained in the buffer
*/
#define TCP_APP_SEND_BUFFER(Buffer, Len) MACROS{ Buffer->Direction = TCP_PACKETDIR_OUT; Buffer->Length = Len; Buffer->Ready = true; }MACROE
/** Application macro: Clears the application buffer, ready for a packet to be written to it.
*
* \param[in] Buffer Application buffer to clear
*/
#define TCP_APP_CLEAR_BUFFER(Buffer) MACROS{ Buffer->Ready = false; Buffer->Length = 0; }MACROE
/** Application macro: Closes an open connection to a host.
*
* \param[in] Connection Open TCP connection to close
*/
#define TCP_APP_CLOSECONNECTION(Connection) MACROS{ Connection->State = TCP_Connection_Closing; }MACROE
/* Enums: */
/** Enum for possible TCP port states. */
enum TCP_PortStates_t
{
TCP_Port_Closed = 0, /**< TCP port closed, no connections to a host may be made on this port. */
TCP_Port_Open = 1, /**< TCP port open, connections to a host may be made on this port. */
};
/** Enum for possible TCP connection states. */
enum TCP_ConnectionStates_t
{
TCP_Connection_Listen = 0, /**< Listening for a connection from a host */
TCP_Connection_SYNSent = 1, /**< Unused */
TCP_Connection_SYNReceived = 2, /**< SYN received, waiting for ACK */
TCP_Connection_Established = 3, /**< Connection established in both directions */
TCP_Connection_FINWait1 = 4, /**< Closing, waiting for ACK */
TCP_Connection_FINWait2 = 5, /**< Closing, waiting for FIN ACK */
TCP_Connection_CloseWait = 6, /**< Closing, waiting for ACK */
TCP_Connection_Closing = 7, /**< Unused */
TCP_Connection_LastACK = 8, /**< Unused */
TCP_Connection_TimeWait = 9, /**< Unused */
TCP_Connection_Closed = 10, /**< Connection closed in both directions */
};
/* Type Defines: */
/** Type define for a TCP connection buffer structure, including size, data and direction. */
typedef struct
{
uint16_t Length; /**< Length of data in the TCP application buffer */
uint8_t Data[TCP_WINDOW_SIZE]; /**< TCP application data buffer */
bool Direction; /**< Buffer transmission direction, either TCP_PACKETDIR_IN or TCP_PACKETDIR_OUT */
bool Ready; /**< If data from host, indicates buffer ready to be read, otherwise indicates
* buffer ready to be sent to the host
*/
bool InUse; /**< Indicates if the buffer is locked to to the current direction, and cannot be changed */
} TCP_ConnectionBuffer_t;
/** Type define for a TCP connection information structure. */
typedef struct
{
uint32_t SequenceNumberIn; /**< Current TCP sequence number for host-to-device */
uint32_t SequenceNumberOut; /**< Current TCP sequence number for device-to-host */
TCP_ConnectionBuffer_t Buffer; /**< Connection application data buffer */
} TCP_ConnectionInfo_t;
/** Type define for a complete TCP connection state. */
typedef struct
{
uint16_t Port; /**< Connection port number on the device */
uint16_t RemotePort; /**< Connection port number on the host */
IP_Address_t RemoteAddress; /**< Connection protocol IP address of the host */
TCP_ConnectionInfo_t Info; /**< Connection information, including application buffer */
uint8_t State; /**< Current connection state, a value from the \ref TCP_ConnectionStates_t enum */
} TCP_ConnectionState_t;
/** Type define for a TCP port state. */
typedef struct
{
uint16_t Port; /**< TCP port number on the device */
uint8_t State; /**< Current port state, a value from the \ref TCP_PortStates_t enum */
void (*ApplicationHandler) (TCP_ConnectionState_t* ConnectionState,
TCP_ConnectionBuffer_t* Buffer); /**< Port application handler */
} TCP_PortState_t;
/** Type define for a TCP packet header. */
typedef struct
{
uint16_t SourcePort; /**< Source port of the TCP packet */
uint16_t DestinationPort; /**< Destination port of the TCP packet */
uint32_t SequenceNumber; /**< Data sequence number of the packet */
uint32_t AcknowledgmentNumber; /**< Data acknowledgment number of the packet */
unsigned Reserved : 4; /**< Reserved, must be all 0 */
unsigned DataOffset : 4; /**< Offset of the data from the start of the header, in 4 byte chunks */
uint8_t Flags; /**< TCP packet flags */
uint16_t WindowSize; /**< Current data window size (bytes remaining in reception buffer) */
uint16_t Checksum; /**< TCP checksum */
uint16_t UrgentPointer; /**< Urgent data pointer */
} TCP_Header_t;
/* Function Prototypes: */
void TCP_TCPTask(USB_ClassInfo_RNDIS_Device_t* const RNDISInterfaceInfo,
Ethernet_Frame_Info_t* const FrameOUT);
void TCP_Init(void);
bool TCP_SetPortState(const uint16_t Port,
const uint8_t State,
void (*Handler)(TCP_ConnectionState_t*, TCP_ConnectionBuffer_t*));
uint8_t TCP_GetPortState(const uint16_t Port);
bool TCP_SetConnectionState(const uint16_t Port,
const IP_Address_t RemoteAddress,
const uint16_t RemotePort,
const uint8_t State);
uint8_t TCP_GetConnectionState(const uint16_t Port,
const IP_Address_t RemoteAddress,
const uint16_t RemotePort);
TCP_ConnectionInfo_t* TCP_GetConnectionInfo(const uint16_t Port,
const IP_Address_t RemoteAddress,
const uint16_t RemotePort);
int16_t TCP_ProcessTCPPacket(void* IPHeaderInStart,
void* TCPHeaderInStart,
void* TCPHeaderOutStart);
#if defined(INCLUDE_FROM_TCP_C)
static uint16_t TCP_Checksum16(void* TCPHeaderOutStart,
const IP_Address_t SourceAddress,
const IP_Address_t DestinationAddress,
uint16_t TCPOutSize);
#endif
#endif
|
/*
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.css.compiler.passes;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.css.compiler.ast.CssCompilerPass;
import com.google.common.css.compiler.ast.CssKeyNode;
import com.google.common.css.compiler.ast.CssKeyframesNode;
import com.google.common.css.compiler.ast.DefaultTreeVisitor;
import com.google.common.css.compiler.ast.ErrorManager;
import com.google.common.css.compiler.ast.GssError;
import com.google.common.css.compiler.ast.MutatingVisitController;
/**
* Compiler pass which ensures that @keyframes rules are only allowed if
* they are enabled. In addition this pass checks if the keys are between
* 0% and 100%. If CSS simplification is enabled, "from" is replaced by "0%"
* and "100%" is replaced by "to".
*
* @author fbenz@google.com (Florian Benz)
*/
public class ProcessKeyframes extends DefaultTreeVisitor
implements CssCompilerPass {
@VisibleForTesting
static final String KEYFRAMES_NOT_ALLOWED_ERROR_MESSAGE =
"a @keyframes rule occured but the option for it is disabled";
@VisibleForTesting
static final String WRONG_KEY_VALUE_ERROR_MESSAGE =
"the value of the key is not between 0% and 100%";
static final String INVALID_NUMBER_ERROR_MESSAGE =
"the value of the key is invalid (not 'from', 'to', or 'XXX.XXX%')";
private final MutatingVisitController visitController;
private final ErrorManager errorManager;
private final boolean keyframesAllowed;
private final boolean simplifyCss;
public ProcessKeyframes(MutatingVisitController visitController,
ErrorManager errorManager,
boolean keyframesAllowed,
boolean simplifyCss) {
this.visitController = visitController;
this.errorManager = errorManager;
this.keyframesAllowed = keyframesAllowed;
this.simplifyCss = simplifyCss;
}
@Override
public boolean enterKeyframesRule(CssKeyframesNode node) {
if (!keyframesAllowed) {
errorManager.report(new GssError(KEYFRAMES_NOT_ALLOWED_ERROR_MESSAGE,
node.getSourceCodeLocation()));
}
return keyframesAllowed;
}
@Override
public boolean enterKey(CssKeyNode node) {
if (!keyframesAllowed) {
return false;
}
String value = node.getKeyValue();
float percentage = -1;
if (value.contains("%")) {
try {
// parse to a float by excluding '%'
percentage = Float.parseFloat(value.substring(0, value.length() - 1));
} catch (NumberFormatException e) {
// should not happen if the generated parser works correctly
errorManager.report(new GssError(INVALID_NUMBER_ERROR_MESSAGE,
node.getSourceCodeLocation()));
return false;
}
if (!checkRangeOfPercentage(node, percentage)) {
return false;
}
} else {
if (!value.equals("from") && !value.equals("to")) {
errorManager.report(new GssError(INVALID_NUMBER_ERROR_MESSAGE,
node.getSourceCodeLocation()));
return false;
}
}
if (simplifyCss) {
compactRepresentation(node, percentage);
}
return true;
}
/**
* Checks if the percentage is between 0% and 100% inclusive.
*
* @param node The {@link CssKeyNode} to get the location in case of an error
* @param percentage The value represented as a float
* @return Returns true if there is no error
*/
private boolean checkRangeOfPercentage(CssKeyNode node, float percentage) {
// check whether the percentage is between 0% and 100%
if (percentage < 0 || percentage > 100) {
errorManager.report(new GssError(WRONG_KEY_VALUE_ERROR_MESSAGE,
node.getSourceCodeLocation()));
return false;
}
return true;
}
/**
* Shortens the representation of the key.
*
* @param node The {@link CssKeyNode} where the percentage belongs to.
* @param percentage The value represented as a float
*/
@VisibleForTesting
void compactRepresentation(CssKeyNode node, float percentage) {
if (node.getKeyValue().equals("from")) {
node.setKeyValue("0%");
} else if (percentage == 100) {
node.setKeyValue("to");
} else if (percentage != -1) {
String percentageStr = Float.toString(percentage);
if (0 < percentage && percentage < 1) {
// eliminate an unnecessary leading 0
percentageStr = percentageStr.substring(1, percentageStr.length());
}
// eliminate a trailing zero like in 0.0
percentageStr = percentageStr.replaceAll("0+$", "");
if (percentageStr.endsWith(".")) {
// if the number ends with '.' then eliminate that too
percentageStr = percentageStr.substring(0, percentageStr.length() - 1);
}
node.setKeyValue(percentageStr + "%");
}
}
@Override
public void runPass() {
visitController.startVisit(this);
}
}
|
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/client_channel.h"
#include <stdio.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/child_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
/* Client channel implementation */
typedef struct call_data call_data;
typedef struct {
/* protects children, child_count, child_capacity, active_child,
transport_setup_initiated
does not protect channel stacks held by children
transport_setup is assumed to be set once during construction */
gpr_mu mu;
/* the sending child (may be null) */
grpc_child_channel *active_child;
/* calls waiting for a channel to be ready */
call_data **waiting_children;
size_t waiting_child_count;
size_t waiting_child_capacity;
/* transport setup for this channel */
grpc_transport_setup *transport_setup;
int transport_setup_initiated;
grpc_channel_args *args;
/* metadata cache */
grpc_mdelem *cancel_status;
} channel_data;
typedef enum {
CALL_CREATED,
CALL_WAITING,
CALL_ACTIVE,
CALL_CANCELLED
} call_state;
struct call_data {
/* owning element */
grpc_call_element *elem;
call_state state;
grpc_metadata_buffer pending_metadata;
gpr_timespec deadline;
union {
struct {
/* our child call stack */
grpc_child_call *child_call;
} active;
struct {
void (*on_complete)(void *user_data, grpc_op_error error);
void *on_complete_user_data;
gpr_uint32 start_flags;
grpc_pollset *pollset;
} waiting;
} s;
};
static int prepare_activate(grpc_call_element *elem,
grpc_child_channel *on_child) {
call_data *calld = elem->call_data;
if (calld->state == CALL_CANCELLED) return 0;
/* no more access to calld->s.waiting allowed */
GPR_ASSERT(calld->state == CALL_WAITING);
calld->state = CALL_ACTIVE;
/* create a child call */
calld->s.active.child_call = grpc_child_channel_create_call(on_child, elem);
return 1;
}
static void do_nothing(void *ignored, grpc_op_error error) {}
static void complete_activate(grpc_call_element *elem, grpc_call_op *op) {
call_data *calld = elem->call_data;
grpc_call_element *child_elem =
grpc_child_call_get_top_element(calld->s.active.child_call);
GPR_ASSERT(calld->state == CALL_ACTIVE);
/* sending buffered metadata down the stack before the start call */
grpc_metadata_buffer_flush(&calld->pending_metadata, child_elem);
if (gpr_time_cmp(calld->deadline, gpr_inf_future) != 0) {
grpc_call_op dop;
dop.type = GRPC_SEND_DEADLINE;
dop.dir = GRPC_CALL_DOWN;
dop.flags = 0;
dop.data.deadline = calld->deadline;
dop.done_cb = do_nothing;
dop.user_data = NULL;
child_elem->filter->call_op(child_elem, elem, &dop);
}
/* continue the start call down the stack, this nees to happen after metadata
are flushed*/
child_elem->filter->call_op(child_elem, elem, op);
}
static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->mu);
if (calld->state == CALL_CANCELLED) {
gpr_mu_unlock(&chand->mu);
op->done_cb(op->user_data, GRPC_OP_ERROR);
return;
}
GPR_ASSERT(calld->state == CALL_CREATED);
calld->state = CALL_WAITING;
if (chand->active_child) {
/* channel is connected - use the connected stack */
if (prepare_activate(elem, chand->active_child)) {
gpr_mu_unlock(&chand->mu);
/* activate the request (pass it down) outside the lock */
complete_activate(elem, op);
} else {
gpr_mu_unlock(&chand->mu);
}
} else {
/* check to see if we should initiate a connection (if we're not already),
but don't do so until outside the lock to avoid re-entrancy problems if
the callback is immediate */
int initiate_transport_setup = 0;
if (!chand->transport_setup_initiated) {
chand->transport_setup_initiated = 1;
initiate_transport_setup = 1;
}
/* add this call to the waiting set to be resumed once we have a child
channel stack, growing the waiting set if needed */
if (chand->waiting_child_count == chand->waiting_child_capacity) {
chand->waiting_child_capacity =
GPR_MAX(chand->waiting_child_capacity * 2, 8);
chand->waiting_children =
gpr_realloc(chand->waiting_children,
chand->waiting_child_capacity * sizeof(call_data *));
}
calld->s.waiting.on_complete = op->done_cb;
calld->s.waiting.on_complete_user_data = op->user_data;
calld->s.waiting.start_flags = op->flags;
calld->s.waiting.pollset = op->data.start.pollset;
chand->waiting_children[chand->waiting_child_count++] = calld;
gpr_mu_unlock(&chand->mu);
/* finally initiate transport setup if needed */
if (initiate_transport_setup) {
grpc_transport_setup_initiate(chand->transport_setup);
}
}
}
static void remove_waiting_child(channel_data *chand, call_data *calld) {
size_t new_count;
size_t i;
for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) {
if (chand->waiting_children[i] == calld) continue;
chand->waiting_children[new_count++] = chand->waiting_children[i];
}
GPR_ASSERT(new_count == chand->waiting_child_count - 1 ||
new_count == chand->waiting_child_count);
chand->waiting_child_count = new_count;
}
static void send_up_cancelled_ops(grpc_call_element *elem) {
grpc_call_op finish_op;
channel_data *chand = elem->channel_data;
/* send up a synthesized status */
finish_op.type = GRPC_RECV_METADATA;
finish_op.dir = GRPC_CALL_UP;
finish_op.flags = 0;
finish_op.data.metadata = grpc_mdelem_ref(chand->cancel_status);
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
grpc_call_next_op(elem, &finish_op);
/* send up a finish */
finish_op.type = GRPC_RECV_FINISH;
finish_op.dir = GRPC_CALL_UP;
finish_op.flags = 0;
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
grpc_call_next_op(elem, &finish_op);
}
static void cancel_rpc(grpc_call_element *elem, grpc_call_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_call_element *child_elem;
gpr_mu_lock(&chand->mu);
switch (calld->state) {
case CALL_ACTIVE:
child_elem = grpc_child_call_get_top_element(calld->s.active.child_call);
gpr_mu_unlock(&chand->mu);
child_elem->filter->call_op(child_elem, elem, op);
return; /* early out */
case CALL_WAITING:
remove_waiting_child(chand, calld);
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu);
send_up_cancelled_ops(elem);
calld->s.waiting.on_complete(calld->s.waiting.on_complete_user_data,
GRPC_OP_ERROR);
return; /* early out */
case CALL_CREATED:
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu);
send_up_cancelled_ops(elem);
return; /* early out */
case CALL_CANCELLED:
gpr_mu_unlock(&chand->mu);
return; /* early out */
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
}
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
call_data *calld = elem->call_data;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_SEND_METADATA:
grpc_metadata_buffer_queue(&calld->pending_metadata, op);
break;
case GRPC_SEND_DEADLINE:
calld->deadline = op->data.deadline;
op->done_cb(op->user_data, GRPC_OP_OK);
break;
case GRPC_SEND_START:
/* filter out the start event to find which child to send on */
start_rpc(elem, op);
break;
case GRPC_CANCEL_OP:
cancel_rpc(elem, op);
break;
case GRPC_SEND_MESSAGE:
case GRPC_SEND_FINISH:
case GRPC_REQUEST_DATA:
if (calld->state == CALL_ACTIVE) {
grpc_call_element *child_elem =
grpc_child_call_get_top_element(calld->s.active.child_call);
child_elem->filter->call_op(child_elem, elem, op);
} else {
op->done_cb(op->user_data, GRPC_OP_ERROR);
}
break;
default:
GPR_ASSERT(op->dir == GRPC_CALL_UP);
grpc_call_next_op(elem, op);
break;
}
}
static void channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem, grpc_channel_op *op) {
channel_data *chand = elem->channel_data;
grpc_child_channel *child_channel;
grpc_channel_op rop;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
switch (op->type) {
case GRPC_CHANNEL_GOAWAY:
/* sending goaway: clear out the active child on the way through */
gpr_mu_lock(&chand->mu);
child_channel = chand->active_child;
chand->active_child = NULL;
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_handle_op(child_channel, op);
grpc_child_channel_destroy(child_channel, 1);
} else {
gpr_slice_unref(op->data.goaway.message);
}
break;
case GRPC_CHANNEL_DISCONNECT:
/* sending disconnect: clear out the active child on the way through */
gpr_mu_lock(&chand->mu);
child_channel = chand->active_child;
chand->active_child = NULL;
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_destroy(child_channel, 1);
}
/* fake a transport closed to satisfy the refcounting in client */
rop.type = GRPC_TRANSPORT_CLOSED;
rop.dir = GRPC_CALL_UP;
grpc_channel_next_op(elem, &rop);
break;
case GRPC_TRANSPORT_GOAWAY:
/* receiving goaway: if it's from our active child, drop the active child;
in all cases consume the event here */
gpr_mu_lock(&chand->mu);
child_channel = grpc_channel_stack_from_top_element(from_elem);
if (child_channel == chand->active_child) {
chand->active_child = NULL;
} else {
child_channel = NULL;
}
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_destroy(child_channel, 0);
}
gpr_slice_unref(op->data.goaway.message);
break;
case GRPC_TRANSPORT_CLOSED:
/* receiving disconnect: if it's from our active child, drop the active
child; in all cases consume the event here */
gpr_mu_lock(&chand->mu);
child_channel = grpc_channel_stack_from_top_element(from_elem);
if (child_channel == chand->active_child) {
chand->active_child = NULL;
} else {
child_channel = NULL;
}
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_destroy(child_channel, 0);
}
break;
default:
switch (op->dir) {
case GRPC_CALL_UP:
grpc_channel_next_op(elem, op);
break;
case GRPC_CALL_DOWN:
gpr_log(GPR_ERROR, "unhandled channel op: %d", op->type);
abort();
break;
}
break;
}
}
static void error_bad_on_complete(void *arg, grpc_op_error error) {
gpr_log(GPR_ERROR,
"Waiting finished but not started? Bad on_complete callback");
abort();
}
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {
call_data *calld = elem->call_data;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GPR_ASSERT(server_transport_data == NULL);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future;
calld->s.waiting.on_complete = error_bad_on_complete;
calld->s.waiting.on_complete_user_data = NULL;
grpc_metadata_buffer_init(&calld->pending_metadata);
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
call_data *calld = elem->call_data;
/* if the metadata buffer is not flushed, destroy it here. */
grpc_metadata_buffer_destroy(&calld->pending_metadata, GRPC_OP_OK);
/* if the call got activated, we need to destroy the child stack also, and
remove it from the in-flight requests tracked by the child_entry we
picked */
if (calld->state == CALL_ACTIVE) {
grpc_child_call_destroy(calld->s.active.child_call);
}
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
channel_data *chand = elem->channel_data;
char temp[GPR_LTOA_MIN_BUFSIZE];
GPR_ASSERT(!is_first);
GPR_ASSERT(is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu);
chand->active_child = NULL;
chand->waiting_children = NULL;
chand->waiting_child_count = 0;
chand->waiting_child_capacity = 0;
chand->transport_setup = NULL;
chand->transport_setup_initiated = 0;
chand->args = grpc_channel_args_copy(args);
gpr_ltoa(GRPC_STATUS_CANCELLED, temp);
chand->cancel_status =
grpc_mdelem_from_strings(metadata_context, "grpc-status", temp);
}
/* Destructor for channel_data */
static void destroy_channel_elem(grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
grpc_transport_setup_cancel(chand->transport_setup);
if (chand->active_child) {
grpc_child_channel_destroy(chand->active_child, 1);
chand->active_child = NULL;
}
grpc_channel_args_destroy(chand->args);
grpc_mdelem_unref(chand->cancel_status);
gpr_mu_destroy(&chand->mu);
GPR_ASSERT(chand->waiting_child_count == 0);
gpr_free(chand->waiting_children);
}
const grpc_channel_filter grpc_client_channel_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "client-channel", };
grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
grpc_channel_stack *channel_stack, grpc_transport *transport,
grpc_channel_filter const **channel_filters, size_t num_channel_filters,
grpc_mdctx *mdctx) {
/* we just got a new transport: lets create a child channel stack for it */
grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
channel_data *chand = elem->channel_data;
size_t num_child_filters = 2 + num_channel_filters;
grpc_channel_filter const **child_filters;
grpc_transport_setup_result result;
grpc_child_channel *old_active = NULL;
call_data **waiting_children;
size_t waiting_child_count;
size_t i;
grpc_call_op *call_ops;
/* build the child filter stack */
child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters);
/* we always need a link back filter to get back to the connected channel */
child_filters[0] = &grpc_child_channel_top_filter;
for (i = 0; i < num_channel_filters; i++) {
child_filters[i + 1] = channel_filters[i];
}
/* and we always need a connected channel to talk to the transport */
child_filters[num_child_filters - 1] = &grpc_connected_channel_filter;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
/* BEGIN LOCKING CHANNEL */
gpr_mu_lock(&chand->mu);
chand->transport_setup_initiated = 0;
if (chand->active_child) {
old_active = chand->active_child;
}
chand->active_child = grpc_child_channel_create(
elem, child_filters, num_child_filters, chand->args, mdctx);
result =
grpc_connected_channel_bind_transport(chand->active_child, transport);
/* capture the waiting children - we'll activate them outside the lock
to avoid re-entrancy problems */
waiting_children = chand->waiting_children;
waiting_child_count = chand->waiting_child_count;
/* bumping up inflight_requests here avoids taking a lock per rpc below */
chand->waiting_children = NULL;
chand->waiting_child_count = 0;
chand->waiting_child_capacity = 0;
call_ops = gpr_malloc(sizeof(grpc_call_op) * waiting_child_count);
for (i = 0; i < waiting_child_count; i++) {
call_ops[i].type = GRPC_SEND_START;
call_ops[i].dir = GRPC_CALL_DOWN;
call_ops[i].flags = waiting_children[i]->s.waiting.start_flags;
call_ops[i].done_cb = waiting_children[i]->s.waiting.on_complete;
call_ops[i].user_data =
waiting_children[i]->s.waiting.on_complete_user_data;
call_ops[i].data.start.pollset = waiting_children[i]->s.waiting.pollset;
if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) {
waiting_children[i] = NULL;
call_ops[i].done_cb(call_ops[i].user_data, GRPC_OP_ERROR);
}
}
/* END LOCKING CHANNEL */
gpr_mu_unlock(&chand->mu);
/* activate any pending operations - this is safe to do as we guarantee one
and only one write operation per request at the surface api - if we lose
that guarantee we need to do some curly locking here */
for (i = 0; i < waiting_child_count; i++) {
if (waiting_children[i]) {
complete_activate(waiting_children[i]->elem, &call_ops[i]);
}
}
gpr_free(waiting_children);
gpr_free(call_ops);
gpr_free(child_filters);
if (old_active) {
grpc_child_channel_destroy(old_active, 1);
}
return result;
}
void grpc_client_channel_set_transport_setup(grpc_channel_stack *channel_stack,
grpc_transport_setup *setup) {
/* post construction initialization: set the transport setup pointer */
grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
channel_data *chand = elem->channel_data;
GPR_ASSERT(!chand->transport_setup);
chand->transport_setup = setup;
}
|
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.task.impl;
import com.intellij.task.ProjectTask;
import org.jetbrains.annotations.Nls;
import org.jetbrains.annotations.NotNull;
import java.util.Collection;
import java.util.Collections;
/**
* @author Vladislav.Soroka
*/
public abstract class AbstractProjectTask implements ProjectTask {
@NotNull
private Collection<ProjectTask> myDependencies;
public AbstractProjectTask() {
this(Collections.emptyList());
}
public AbstractProjectTask(@NotNull Collection<ProjectTask> dependencies) {
myDependencies = dependencies;
}
@NotNull
public Collection<ProjectTask> getDependsOn() {
return myDependencies;
}
public void setDependsOn(@NotNull Collection<ProjectTask> dependencies) {
myDependencies = dependencies;
}
@Override
@Nls
public String toString() {
return getPresentableName();
}
}
|
/*************************************************************************/
/* */
/* Centre for Speech Technology Research */
/* University of Edinburgh, UK */
/* Copyright (c) 1995,1996 */
/* All Rights Reserved. */
/* */
/* Permission is hereby granted, free of charge, to use and distribute */
/* this software and its documentation without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of this work, and to */
/* permit persons to whom this work is furnished to do so, subject to */
/* the following conditions: */
/* 1. The code must retain the above copyright notice, this list of */
/* conditions and the following disclaimer. */
/* 2. Any modifications must be clearly marked as such. */
/* 3. Original authors' names are not deleted. */
/* 4. The authors' names are not used to endorse or promote products */
/* derived from this software without specific prior written */
/* permission. */
/* */
/* THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK */
/* DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING */
/* ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT */
/* SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE */
/* FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES */
/* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN */
/* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, */
/* ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF */
/* THIS SOFTWARE. */
/* */
/*************************************************************************/
/**@name EST_track_aux.h
* EST_Track Auxiliary functions
* @author Paul Taylor <pault@cstr.ed.ac.uk>
* @version $Id: EST_track_aux.h,v 1.4 2004/05/24 11:15:51 korin Exp $
*/
//@{
#ifndef __EST_TRACK_AUX_H__
#define __EST_TRACK_AUX_H__
#include "EST_FMatrix.h"
#include "EST_TList.h"
#include "ling_class/EST_Relation.h"
#include "EST_Option.h"
#include "EST_Track.h"
#include "EST_TBuffer.h"
void track_smooth(EST_Track &c, float x, EST_String stype = "");
void time_med_smooth(EST_Track &c, float x);
void time_mean_smooth(EST_Track &c, float x);
void simple_med_smooth(EST_Track &c, int n, int channel=0);
void simple_mean_smooth(EST_Track &c, int n, int channel=0);
/** Calculate the mean absolute error between the same channel in
* two tracks. This is given by \[\frac{1}{n}\sum_{i=1}^{n}|a_{i} - b_{i}|\]
* @see abs_error, rms_error(EST_Track &a, EST_Track &b)
*/
float abs_error(EST_Track &a, EST_Track &b, int channel);
void absolute(EST_Track &tr);
void normalise(EST_Track &tr);
void normalise(EST_Track &tr, float mean, float sd, int channel,
float upper, float lower);
void normalise(EST_Track &tr, EST_FVector &mean, EST_FVector &sd,
float upper, float lower);
void normalise(EST_TrackList &trlist, EST_FVector &mean,
EST_FVector &sd, float upper, float lower);
/** Calculate the simple derivative of a track. This is given by
* \[a_{i+1} - a_{i}\] The values in the resultant track are spaced
* midway between the values in the input track, resulting in 1 fewer
* frames in the track. This is a very local estimation of the derivative
* of the track at a point in time. A smoother value can be obtained
* using the delta function.
* @see delta
*/
EST_Track differentiate(EST_Track &c, float samp_int=0.0);
EST_Track difference(EST_Track &a, EST_Track &b);
float mean( const EST_Track &a, int channel );
void mean( const EST_Track &a, EST_FVector &m );
void meansd(EST_Track &a, float &m, float &sd, int channel);
/** Calculate the root mean square error between the same channel in
* two tracks. The channel is identified by its index.
* @see abs_error, float rms_error(EST_Track &a, EST_Track &b)
*/
float rms_error(EST_Track &a, EST_Track &b, int channel);
float correlation(EST_Track &a, EST_Track &b, int channel);
void meansd(EST_Track &a, EST_FVector &m, EST_FVector &sd);
/** Calculate the root mean square error between each channels in two
* tracks. For two tracks of M channels, the result is returned as an
* EST_FVector of size M, with element {\it i} representing the
* rms error for channel {\it i}.
* @see abs_error, rms_error
*/
EST_FVector rms_error(EST_Track &a, EST_Track &b);
EST_FVector abs_error(EST_Track &a, EST_Track &b); EST_FVector
correlation(EST_Track &a, EST_Track &b);
/// Move the start and end variables to the nearest frame.
void align_to_track(EST_Track &tr, float &start, float &end);
/// Move the start and end variables to the nearest frame.
void align_to_track(EST_Track &tr, int &start, int &end, int sample_rate);
/// Move the start and end variables to the start and end of the nearest frame.
void move_to_frame_ends(EST_Track &tr,
int &start, int &end,
int sample_rate, float offset=0.0);
/// Index of the frame whose start boundary
int nearest_boundary(EST_Track &tr, float time, int sample_rate, float offset=0);
/// Move the track so that it starts at the indicated time.
void set_start(EST_Track &tr, float start);
/// Move the track by {\it shift} seconds
void move_start(EST_Track &tr, float shift);
EST_Track error(EST_Track &ref, EST_Track &test, int relax= 0);
void extract(EST_Track &orig, float start, float end, EST_Track &res);
int track_divide(EST_TrackList &mtfr, EST_Track &fv, EST_Relation &key);
void ParallelTracks(EST_Track &a, EST_TrackList &list,const EST_String &style);
void track_info(EST_Track &track);
EST_String options_track_filetypes(void);
EST_String options_track_filetypes_long(void);
EST_String options_subtrack(void);
int read_track(EST_Track &tr, const EST_String &in_file, EST_Option &al);
/** Return the frame size in {\bf seconds} based on analysis of
current time points. This function basically determines the local
frame size (shift) by subtracting the current time point from the next
time point. If the {\tt prefer_prev} flag is set to {\tt true}, or the
index is the last in the track, the size is determined by subtracting
the previous time point from the current one.
This is most commonly used in pitch synchronous analysis to determine
the local pitch period.
@see get_frame_size
*/
float get_time_frame_size(EST_Track &pms, int i, int prefer_prev = 0);
/** Return the frame size in {\bf samples} based on analysis of
current time points. This function basically determines the local
frame size (shift) by subtracting the current time point from the next
time point. If the {\tt prefer_prev} flag is set to {\tt true}, or the
index is the last in the track, the size is determined by subtracting
the previous time point from the current one.
This is most commonly used in pitch synchronous analysis to determine
the local pitch period.
@see get_time_frame_size
*/
int get_frame_size(EST_Track &pms, int current_pos, int sample_rate,
int prefer_prev=0);
/// How many coefficients in track (looks for Coef0 and coefN channels)
int get_order(const EST_Track &t, EST_CoefficientType type, int d=0);
int get_order(const EST_Track &t);
/// Total the length channel values.
int sum_lengths(const EST_Track &t,
int sample_rate,
int start_frame=0, int end_frame=-1);
/// Find the start point in the signal of the sections of speech related to each frame.
void get_start_positions(const EST_Track &t,
int sample_rate,
EST_TBuffer<int> &pos);
/**@name Analysis frame position
* Functions which define which part of a single is associated with a
* given frame in a track.
* <p>
* This is defined here in one place for consistency. They are inline since
* they tend to be used in inner loops. There are two versions,
* the second for when there are offsets in the track.
*/
//@{
/// Get the start and end of a given frame (in samples)
static inline void get_frame(const EST_Track &tr, int sample_rate,
int f,
int &start, int ¢er, int &end)
{
center = (int)(tr.t(f)*sample_rate + 0.5);
start = center - (int)(tr.a(f, channel_length)/2.0);
end = start + (int)(tr.a(f, channel_length));
}
/// Get the start and end of a given frame (in seconds)
static inline void get_frame(const EST_Track &tr, int sample_rate,
int f,
float &start, float ¢er, float &end)
{
center = tr.t(f);
start = center - tr.a(f, channel_length)/(float)sample_rate/2.0;
end = start + tr.a(f, channel_length)/(float)sample_rate;
}
/// Get the start and end of a given frame (in samples)
static inline void get_frame_o(const EST_Track &tr, int sample_rate,
int f,
int &start, int ¢er, int &end)
{
center = (int)(tr.t(f)*sample_rate + tr.a(f,channel_offset) + 0.5);
start = center - (int)(tr.a(f, channel_length)/2.0);
end = start + (int)(tr.a(f, channel_length));
}
/// Get the start and end of a given frame (in seconds)
static inline void get_frame_o(const EST_Track &tr, int sample_rate,
int f,
float &start, float ¢er, float &end)
{
center = tr.t(f) + tr.a(f,channel_offset)/(float)sample_rate;
start = center - tr.a(f, channel_length)/(float)sample_rate/2.0;
end = start + tr.a(f, channel_length)/(float)sample_rate;
}
//@}
// take one of the channels as the timeline
void channel_to_time(EST_Track &tr, int channel, float scale=1.0);
void channel_to_time(EST_Track &tr, EST_ChannelType c,float scale=1.0);
void channel_to_time(EST_Track &tr, const EST_String c_name, float scale=1.0);
void channel_to_time_lengths(EST_Track &tr, int channel, float scale=1.0);
void channel_to_time_lengths(EST_Track &tr, EST_ChannelType c,float scale=1.0);
void channel_to_time_lengths(EST_Track &tr, const EST_String c_name, float scale=1.0);
/* Allow EST_Track to be used in an EST_Val */
VAL_REGISTER_CLASS_DCLS(track,EST_Track)
#endif /* __EST_TRACK_AUX_H__ */
//@}
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.pipeline.cumulativesum;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregatorFactory;
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.BucketMetricsParser;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT;
public class CumulativeSumPipelineAggregationBuilder extends AbstractPipelineAggregationBuilder<CumulativeSumPipelineAggregationBuilder> {
public static final String NAME = "cumulative_sum";
private String format;
public CumulativeSumPipelineAggregationBuilder(String name, String bucketsPath) {
super(name, NAME, new String[] { bucketsPath });
}
/**
* Read from a stream.
*/
public CumulativeSumPipelineAggregationBuilder(StreamInput in) throws IOException {
super(in, NAME);
format = in.readOptionalString();
}
@Override
protected final void doWriteTo(StreamOutput out) throws IOException {
out.writeOptionalString(format);
}
/**
* Sets the format to use on the output of this aggregation.
*/
public CumulativeSumPipelineAggregationBuilder format(String format) {
if (format == null) {
throw new IllegalArgumentException("[format] must not be null: [" + name + "]");
}
this.format = format;
return this;
}
/**
* Gets the format to use on the output of this aggregation.
*/
public String format() {
return format;
}
protected DocValueFormat formatter() {
if (format != null) {
return new DocValueFormat.Decimal(format);
} else {
return DocValueFormat.RAW;
}
}
@Override
protected PipelineAggregator createInternal(Map<String, Object> metaData) throws IOException {
return new CumulativeSumPipelineAggregator(name, bucketsPaths, formatter(), metaData);
}
@Override
public void doValidate(AggregatorFactory<?> parent, List<AggregationBuilder> aggFactories,
List<PipelineAggregationBuilder> pipelineAggregatorFactories) {
if (bucketsPaths.length != 1) {
throw new IllegalStateException(BUCKETS_PATH.getPreferredName()
+ " must contain a single entry for aggregation [" + name + "]");
}
if (parent instanceof HistogramAggregatorFactory) {
HistogramAggregatorFactory histoParent = (HistogramAggregatorFactory) parent;
if (histoParent.minDocCount() != 0) {
throw new IllegalStateException("parent histogram of cumulative sum aggregation [" + name
+ "] must have min_doc_count of 0");
}
} else if (parent instanceof DateHistogramAggregatorFactory) {
DateHistogramAggregatorFactory histoParent = (DateHistogramAggregatorFactory) parent;
if (histoParent.minDocCount() != 0) {
throw new IllegalStateException("parent histogram of cumulative sum aggregation [" + name
+ "] must have min_doc_count of 0");
}
} else {
throw new IllegalStateException("cumulative sum aggregation [" + name
+ "] must have a histogram or date_histogram as parent");
}
}
@Override
protected final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException {
if (format != null) {
builder.field(BucketMetricsParser.FORMAT.getPreferredName(), format);
}
return builder;
}
public static CumulativeSumPipelineAggregationBuilder parse(String pipelineAggregatorName, XContentParser parser)
throws IOException {
XContentParser.Token token;
String currentFieldName = null;
String[] bucketsPaths = null;
String format = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_STRING) {
if (FORMAT.match(currentFieldName)) {
format = parser.text();
} else if (BUCKETS_PATH.match(currentFieldName)) {
bucketsPaths = new String[] { parser.text() };
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "].");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (BUCKETS_PATH.match(currentFieldName)) {
List<String> paths = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String path = parser.text();
paths.add(path);
}
bucketsPaths = paths.toArray(new String[paths.size()]);
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unknown key for a " + token + " in [" + pipelineAggregatorName + "]: [" + currentFieldName + "].");
}
} else {
throw new ParsingException(parser.getTokenLocation(),
"Unexpected token " + token + " in [" + pipelineAggregatorName + "].");
}
}
if (bucketsPaths == null) {
throw new ParsingException(parser.getTokenLocation(), "Missing required field [" + BUCKETS_PATH.getPreferredName()
+ "] for derivative aggregation [" + pipelineAggregatorName + "]");
}
CumulativeSumPipelineAggregationBuilder factory =
new CumulativeSumPipelineAggregationBuilder(pipelineAggregatorName, bucketsPaths[0]);
if (format != null) {
factory.format(format);
}
return factory;
}
@Override
protected int doHashCode() {
return Objects.hash(format);
}
@Override
protected boolean doEquals(Object obj) {
CumulativeSumPipelineAggregationBuilder other = (CumulativeSumPipelineAggregationBuilder) obj;
return Objects.equals(format, other.format);
}
@Override
public String getWriteableName() {
return NAME;
}
} |
package configuration
type HAProxy struct {
TemplatePath string
OutputPath string
ReloadCommand string
}
|
dojo.provide("dojox.charting.widget.nls.Chart2D_en-us");dojo.provide("dojo.cldr.nls.number");dojo.cldr.nls.number._built=true;dojo.provide("dojo.cldr.nls.number.en_us");dojo.cldr.nls.number.en_us={"group":",","percentSign":"%","exponential":"E","percentFormat":"#,##0%","scientificFormat":"#E0","list":";","infinity":"∞","patternDigit":"#","minusSign":"-","decimal":".","nan":"NaN","nativeZeroDigit":"0","perMille":"‰","decimalFormat":"#,##0.###","currencyFormat":"¤#,##0.00;(¤#,##0.00)","plusSign":"+","currencySpacing-afterCurrency-currencyMatch":"[:letter:]","currencySpacing-beforeCurrency-surroundingMatch":"[:digit:]","currencySpacing-afterCurrency-insertBetween":" ","currencySpacing-afterCurrency-surroundingMatch":"[:digit:]","currencySpacing-beforeCurrency-currencyMatch":"[:letter:]","currencySpacing-beforeCurrency-insertBetween":" "};
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.scripting.core.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.ops4j.pax.exam.CoreOptions.bundle;
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
import static org.ops4j.pax.exam.CoreOptions.options;
import static org.ops4j.pax.exam.CoreOptions.provision;
import static org.ops4j.pax.exam.CoreOptions.systemProperty;
import static org.ops4j.pax.exam.CoreOptions.when;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Dictionary;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.script.Bindings;
import javax.script.ScriptEngine;
import javax.script.ScriptEngineFactory;
import org.apache.sling.scripting.api.BindingsValuesProvider;
import org.apache.sling.scripting.api.BindingsValuesProvidersByContext;
import org.apache.sling.scripting.core.it.ScriptingCoreTestSupport;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.exam.util.Filter;
import org.osgi.framework.BundleContext;
import org.osgi.framework.Constants;
import org.osgi.framework.ServiceRegistration;
@RunWith(PaxExam.class)
public class BindingsValuesProvidersByContextIT extends ScriptingCoreTestSupport{
@Inject
@Filter(timeout = 300000)
private BindingsValuesProvidersByContext bvpProvider;
@Inject
private BundleContext bundleContext;
private final List<ServiceRegistration> regs = new ArrayList<ServiceRegistration>();
@Before
public void setup() {
regs.clear();
}
@After
public void cleanup() {
for(ServiceRegistration reg : regs) {
reg.unregister();
}
}
private Dictionary<String, Object> getProperties(String context, String engineName) {
final Dictionary<String, Object> props = new Hashtable<String, Object>();
if(context != null) {
props.put(BindingsValuesProvider.CONTEXT, context.split(","));
}
if(engineName != null) {
props.put(ScriptEngine.NAME, engineName);
}
return props;
}
private void addBVP(final String id, String context, String engineName) {
final BindingsValuesProvider bvp = new BindingsValuesProvider() {
@Override
public String toString() {
return id;
}
@Override
public void addBindings(Bindings b) {
}
};
regs.add(bundleContext.registerService(BindingsValuesProvider.class.getName(), bvp, getProperties(context, engineName)));
}
private void addBVPWithServiceRanking(final String id, String context, String engineName, int serviceRanking) {
final BindingsValuesProvider bvp = new BindingsValuesProvider() {
@Override
public String toString() {
return id;
}
@Override
public void addBindings(Bindings b) {
}
};
final Dictionary<String, Object> properties = getProperties(context, engineName);
properties.put(Constants.SERVICE_RANKING, serviceRanking);
regs.add(bundleContext.registerService(BindingsValuesProvider.class.getName(), bvp, properties));
}
private void addMap(final String id, String context, String engineName) {
final Map<String, Object> result = new HashMap<String, Object>() {
private static final long serialVersionUID = 1L;
@Override
public String toString() {
return "M_" + id;
}
};
regs.add(bundleContext.registerService(Map.class.getName(), result, getProperties(context, engineName)));
}
private ScriptEngineFactory factory(final String engineName) {
return new ScriptEngineFactory() {
@Override
public ScriptEngine getScriptEngine() {
return null;
}
@Override
public String getProgram(String... arg0) {
return null;
}
@Override
public Object getParameter(String arg0) {
return null;
}
@Override
public String getOutputStatement(String arg0) {
return null;
}
@Override
public List<String> getNames() {
final List<String> names = new ArrayList<String>();
names.add(engineName);
return names;
}
@Override
public List<String> getMimeTypes() {
return null;
}
@Override
public String getMethodCallSyntax(String arg0, String arg1, String... arg2) {
return null;
}
@Override
public String getLanguageVersion() {
return null;
}
@Override
public String getLanguageName() {
return null;
}
@Override
public List<String> getExtensions() {
return null;
}
@Override
public String getEngineVersion() {
return null;
}
@Override
public String getEngineName() {
return engineName;
}
};
}
private String asString(Collection<?> data, boolean sortList) {
final List<String> maybeSorted = new ArrayList<String>();
for(Object o : data) {
maybeSorted.add(o.toString());
}
if(sortList) {
Collections.sort(maybeSorted);
}
final StringBuilder sb = new StringBuilder();
for(String str : maybeSorted) {
if(sb.length() > 0) {
sb.append(",");
}
sb.append(str);
}
return sb.toString();
}
private String asString(Collection<?> data) {
return asString(data, true);
}
@Test
public void testAny() {
addBVP("one", null, "js");
addBVP("two", null, null);
addBVP("three", null, "*");
addBVP("four", null, "ANY");
addBVP("five", null, "basic");
assertEquals("four,one,three,two", asString(bvpProvider.getBindingsValuesProviders(factory("js"), null)));
assertEquals("five,four,three,two", asString(bvpProvider.getBindingsValuesProviders(factory("basic"), null)));
assertEquals("four,three,two", asString(bvpProvider.getBindingsValuesProviders(factory("other"), null)));
final String unsorted = asString(bvpProvider.getBindingsValuesProviders(factory("js"), null), false);
assertTrue("Expecting js language-specific BVP at the end", unsorted.endsWith("one"));
}
@Test
public void testContextsAndLanguages() {
addBVP("foo", null, "js");
addBVP("bar", null, null);
addBVP("r1", "request", "js");
addBVP("r2", "request", null);
addBVP("o1", "other", "js");
addBVP("o2", "other", null);
addBVP("o3", "other,request", null);
addBVP("o4", "python", null);
addBVP("python", "python", "python");
assertEquals("bar,foo,o3,r1,r2", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "request")));
assertEquals("With default content", "bar,foo,o3,r1,r2", asString(bvpProvider.getBindingsValuesProviders(factory("js"), null)));
assertEquals("o1,o2,o3", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "other")));
assertEquals("o4,python", asString(bvpProvider.getBindingsValuesProviders(factory("python"), "python")));
assertEquals("", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "unusedContext")));
final String unsorted = asString(bvpProvider.getBindingsValuesProviders(factory("python"), "python"), false);
assertTrue("Expecting python language-specific BVP at the end", unsorted.endsWith("python"));
}
@Test
public void testMapsAndBvps() {
addBVP("foo", null, "js");
addMap("bar", null, null);
addMap("r1", "request", "js");
addMap("r2", "request", null);
addMap("o1", "other", "js");
addBVP("o2", "other", null);
addMap("o3", "other,request", null);
addBVP("o4", "python", null);
addMap("python", "python", "python");
assertEquals("M_bar,M_o3,M_r1,M_r2,foo", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "request")));
assertEquals("With default content", "M_bar,M_o3,M_r1,M_r2,foo", asString(bvpProvider.getBindingsValuesProviders(factory("js"), null)));
assertEquals("M_o1,M_o3,o2", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "other")));
assertEquals("", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "unusedContext")));
assertEquals("M_python,o4", asString(bvpProvider.getBindingsValuesProviders(factory("python"), "python")));
final String unsorted = asString(bvpProvider.getBindingsValuesProviders(factory("python"), "python"), false);
assertTrue("Expecting python language-specific BVP at the end", unsorted.endsWith("M_python"));
}
@Test
public void testBVPsWithServiceRankingA() {
addBVPWithServiceRanking("last", null, "js", Integer.MAX_VALUE);
addBVPWithServiceRanking("second", null, "js", 0);
addBVPWithServiceRanking("first", null, "js", Integer.MIN_VALUE);
assertEquals("first,second,last", asString(bvpProvider.getBindingsValuesProviders(factory("js"), null), false));
}
@Test
public void testBVPsWithServiceRankingB() {
addBVPWithServiceRanking("first", null, "js", Integer.MIN_VALUE);
addBVPWithServiceRanking("second", null, "js", 0);
addBVPWithServiceRanking("last", null, "js", Integer.MAX_VALUE);
assertEquals("first,second,last", asString(bvpProvider.getBindingsValuesProviders(factory("js"), null), false));
}
@Test
public void testBVPsWithServiceRankingC() {
addBVPWithServiceRanking("second", "request", "js", 0);
addBVPWithServiceRanking("first", "request", "js", Integer.MIN_VALUE);
addBVPWithServiceRanking("genericThree", "request", null, 42);
addBVPWithServiceRanking("genericTwo", "request", null, 0);
addBVPWithServiceRanking("last", "request", "js", Integer.MAX_VALUE);
addBVPWithServiceRanking("genericOne", "request", null, -42);
assertEquals("genericOne,genericTwo,genericThree,first,second,last", asString(bvpProvider.getBindingsValuesProviders(factory("js"), "request"), false));
}
}
|
<?php
/**
* CakeTestSuiteDispatcher controls dispatching TestSuite web based requests.
*
* PHP 5
*
* CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
* Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
*
* Licensed under The MIT License
* Redistributions of files must retain the above copyright notice
*
* @copyright Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
* @link http://cakephp.org CakePHP(tm) Project
* @package Cake.TestSuite
* @since CakePHP(tm) v 1.3
* @license MIT License (http://www.opensource.org/licenses/mit-license.php)
*/
define('CORE_TEST_CASES', CAKE . 'Test' . DS . 'Case');
define('APP_TEST_CASES', TESTS . 'Case');
App::uses('CakeTestSuiteCommand', 'TestSuite');
/**
* CakeTestSuiteDispatcher handles web requests to the test suite and runs the correct action.
*
* @package Cake.TestSuite
*/
class CakeTestSuiteDispatcher {
/**
* 'Request' parameters
*
* @var array
*/
public $params = array(
'codeCoverage' => false,
'case' => null,
'core' => false,
'app' => true,
'plugin' => null,
'output' => 'html',
'show' => 'groups',
'show_passes' => false,
'filter' => false,
'fixture' => null
);
/**
* Baseurl for the request
*
* @var string
*/
protected $_baseUrl;
/**
* Base dir of the request. Used for accessing assets.
*
* @var string
*/
protected $_baseDir;
/**
* boolean to set auto parsing of params.
*
* @var boolean
*/
protected $_paramsParsed = false;
/**
* reporter instance used for the request
*
* @var CakeBaseReporter
*/
protected static $_Reporter = null;
/**
* constructor
*
* @return void
*/
public function __construct() {
$this->_baseUrl = $_SERVER['PHP_SELF'];
$dir = rtrim(dirname($this->_baseUrl), '\\');
$this->_baseDir = ($dir === '/') ? $dir : $dir . '/';
}
/**
* Runs the actions required by the URL parameters.
*
* @return void
*/
public function dispatch() {
$this->_checkPHPUnit();
$this->_parseParams();
if ($this->params['case']) {
$value = $this->_runTestCase();
} else {
$value = $this->_testCaseList();
}
$output = ob_get_clean();
echo $output;
return $value;
}
/**
* Static method to initialize the test runner, keeps global space clean
*
* @return void
*/
public static function run() {
$dispatcher = new CakeTestSuiteDispatcher();
$dispatcher->dispatch();
}
/**
* Checks that PHPUnit is installed. Will exit if it doesn't
*
* @return void
*/
protected function _checkPHPUnit() {
$found = $this->loadTestFramework();
if (!$found) {
$baseDir = $this->_baseDir;
include CAKE . 'TestSuite' . DS . 'templates' . DS . 'phpunit.php';
exit();
}
}
/**
* Checks for the existence of the test framework files
*
* @return boolean true if found, false otherwise
*/
public function loadTestFramework() {
foreach (App::path('vendors') as $vendor) {
if (is_dir($vendor . 'PHPUnit')) {
ini_set('include_path', $vendor . PATH_SEPARATOR . ini_get('include_path'));
break;
}
}
return include 'PHPUnit' . DS . 'Autoload.php';
}
/**
* Checks for the xdebug extension required to do code coverage. Displays an error
* if xdebug isn't installed.
*
* @return void
*/
protected function _checkXdebug() {
if (!extension_loaded('xdebug')) {
$baseDir = $this->_baseDir;
include CAKE . 'TestSuite' . DS . 'templates' . DS . 'xdebug.php';
exit();
}
}
/**
* Generates a page containing the a list of test cases that could be run.
*
* @return void
*/
protected function _testCaseList() {
$command = new CakeTestSuiteCommand('', $this->params);
$Reporter = $command->handleReporter($this->params['output']);
$Reporter->paintDocumentStart();
$Reporter->paintTestMenu();
$Reporter->testCaseList();
$Reporter->paintDocumentEnd();
}
/**
* Sets the params, calling this will bypass the auto parameter parsing.
*
* @param array $params Array of parameters for the dispatcher
* @return void
*/
public function setParams($params) {
$this->params = $params;
$this->_paramsParsed = true;
}
/**
* Parse url params into a 'request'
*
* @return void
*/
protected function _parseParams() {
if (!$this->_paramsParsed) {
if (!isset($_SERVER['SERVER_NAME'])) {
$_SERVER['SERVER_NAME'] = '';
}
foreach ($this->params as $key => $value) {
if (isset($_GET[$key])) {
$this->params[$key] = $_GET[$key];
}
}
if (isset($_GET['code_coverage'])) {
$this->params['codeCoverage'] = true;
$this->_checkXdebug();
}
}
if (empty($this->params['plugin']) && empty($this->params['core'])) {
$this->params['app'] = true;
}
$this->params['baseUrl'] = $this->_baseUrl;
$this->params['baseDir'] = $this->_baseDir;
}
/**
* Runs a test case file.
*
* @return void
*/
protected function _runTestCase() {
$commandArgs = array(
'case' => $this->params['case'],
'core' => $this->params['core'],
'app' => $this->params['app'],
'plugin' => $this->params['plugin'],
'codeCoverage' => $this->params['codeCoverage'],
'showPasses' => !empty($this->params['show_passes']),
'baseUrl' => $this->_baseUrl,
'baseDir' => $this->_baseDir,
);
$options = array(
'--filter', $this->params['filter'],
'--output', $this->params['output'],
'--fixture', $this->params['fixture']
);
restore_error_handler();
try {
self::time();
$command = new CakeTestSuiteCommand('CakeTestLoader', $commandArgs);
$command->run($options);
} catch (MissingConnectionException $exception) {
ob_end_clean();
$baseDir = $this->_baseDir;
include CAKE . 'TestSuite' . DS . 'templates' . DS . 'missing_connection.php';
exit();
}
}
/**
* Sets a static timestamp
*
* @param boolean $reset to set new static timestamp.
* @return integer timestamp
*/
public static function time($reset = false) {
static $now;
if ($reset || !$now) {
$now = time();
}
return $now;
}
/**
* Returns formatted date string using static time
* This method is being used as formatter for created, modified and updated fields in Model::save()
*
* @param string $format format to be used.
* @return string formatted date
*/
public static function date($format) {
return date($format, self::time());
}
}
|
var JEY_TOPO = {"type":"Topology","objects":{"jey":{"type":"GeometryCollection","geometries":[{"type":"Polygon","properties":{"name":"Jersey"},"id":"JE","arcs":[[0]]}]}},"arcs":[[[6817,9289],[634,-999],[2019,-1611],[-56,-1021],[199,-429],[233,-638],[153,-293],[-373,-548],[-170,-676],[-45,-868],[3,-1156],[-110,-1050],[-283,13],[-380,446],[-393,268],[-1316,42],[-728,519],[-994,1620],[-371,408],[-386,268],[-296,0],[-346,-595],[-329,-961],[-345,-1301],[-362,196],[-536,935],[-446,297],[-122,-161],[-874,-595],[-438,-22],[306,2245],[-289,2351],[-376,2032],[38,1284],[503,289],[606,-289],[810,-714],[579,68],[444,229],[413,434],[472,693],[337,-540],[933,-374],[484,-510],[291,501],[270,154],[637,59]]],"transform":{"scale":[0.000023374733373337532,0.00000957126962695701],"translate":[-2.242014126999919,49.17133209800008]}} |
// { dg-do run { xfail sparc64-*-elf arm-*-pe } }
// { dg-options "-fexceptions" }
#include <exception>
#include <stdlib.h>
void my_terminate() {
exit (0); // Double faults should call terminate
}
struct A {
A() { }
~A() {
std::set_terminate (my_terminate);
throw 1; // This throws from EH dtor, should call my_terminate
}
};
int main() {
try {
try {
throw 1;
} catch (int i) {
A a; // A hit on this EH dtor went to the wrong place
throw 1;
}
} catch (...) {
return 1;
}
return 1;
}
|
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.test.examples.bpmn.gateway;
import java.util.HashMap;
import java.util.Map;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.impl.test.PluggableProcessEngineTestCase;
import org.camunda.bpm.engine.runtime.ProcessInstance;
import org.camunda.bpm.engine.task.Task;
import org.camunda.bpm.engine.test.Deployment;
/**
* Example of using the exclusive gateway.
*
* @author Joram Barrez
*/
public class ExclusiveGatewayTest extends PluggableProcessEngineTestCase {
/**
* The test process has an XOR gateway where, the 'input' variable is used to
* select one of the outgoing sequence flow. Every one of those sequence flow
* goes to another task, allowing us to test the decision very easily.
*/
@Deployment
public void testDecisionFunctionality() {
Map<String, Object> variables = new HashMap<String, Object>();
// Test with input == 1
variables.put("input", 1);
ProcessInstance pi = runtimeService.startProcessInstanceByKey("exclusiveGateway", variables);
Task task = taskService.createTaskQuery().processInstanceId(pi.getId()).singleResult();
assertEquals("Send e-mail for more information", task.getName());
// Test with input == 2
variables.put("input", 2);
pi = runtimeService.startProcessInstanceByKey("exclusiveGateway", variables);
task = taskService.createTaskQuery().processInstanceId(pi.getId()).singleResult();
assertEquals("Check account balance", task.getName());
// Test with input == 3
variables.put("input", 3);
pi = runtimeService.startProcessInstanceByKey("exclusiveGateway", variables);
task = taskService.createTaskQuery().processInstanceId(pi.getId()).singleResult();
assertEquals("Call customer", task.getName());
// Test with input == 4
variables.put("input", 4);
try {
runtimeService.startProcessInstanceByKey("exclusiveGateway", variables);
fail();
} catch (ProcessEngineException e) {
// Exception is expected since no outgoing sequence flow matches
}
}
}
|
/*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okhttp3.internal.http2.hpackjson;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import okio.Okio;
/**
* Utilities for reading HPACK tests.
*/
public final class HpackJsonUtil {
/** Earliest draft that is code-compatible with latest. */
private static final int BASE_DRAFT = 9;
private static final String STORY_RESOURCE_FORMAT = "/hpack-test-case/%s/story_%02d.json";
private static final Moshi MOSHI = new Moshi.Builder().build();
private static final JsonAdapter<Story> STORY_JSON_ADAPTER = MOSHI.adapter(Story.class);
private static Story readStory(InputStream jsonResource) throws IOException {
return STORY_JSON_ADAPTER.fromJson(Okio.buffer(Okio.source(jsonResource)));
}
private static Story readStory(File file) throws IOException {
return STORY_JSON_ADAPTER.fromJson(Okio.buffer(Okio.source(file)));
}
/** Iterate through the hpack-test-case resources, only picking stories for the current draft. */
public static String[] storiesForCurrentDraft() throws URISyntaxException {
File testCaseDirectory = new File(HpackJsonUtil.class.getResource("/hpack-test-case").toURI());
List<String> storyNames = new ArrayList<>();
for (File path : testCaseDirectory.listFiles()) {
if (path.isDirectory() && Arrays.asList(path.list()).contains("story_00.json")) {
try {
Story firstStory = readStory(new File(path, "story_00.json"));
if (firstStory.getDraft() >= BASE_DRAFT) {
storyNames.add(path.getName());
}
} catch (IOException ignored) {
// Skip this path.
}
}
}
return storyNames.toArray(new String[storyNames.size()]);
}
/**
* Reads stories named "story_xx.json" from the folder provided.
*/
public static List<Story> readStories(String testFolderName) throws Exception {
List<Story> result = new ArrayList<>();
int i = 0;
while (true) { // break after last test.
String storyResourceName = String.format(STORY_RESOURCE_FORMAT, testFolderName, i);
InputStream storyInputStream = HpackJsonUtil.class.getResourceAsStream(storyResourceName);
if (storyInputStream == null) {
break;
}
try {
Story story = readStory(storyInputStream);
story.setFileName(storyResourceName);
result.add(story);
i++;
} finally {
storyInputStream.close();
}
}
return result;
}
private HpackJsonUtil() {
} // Utilities only.
}
|
<!DOCTYPE html>
<html>
<head>
<meta name="description" content="Simplest possible examples of HTML, CSS and JavaScript." />
<meta name="author" content="//samdutton.com">
<meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1.0, user-scalable=yes">
<meta itemprop="name" content="simpl.info: simplest possible examples of HTML, CSS and JavaScript">
<meta itemprop="image" content="/icon_192x192.png">
<meta name="mobile-web-app-capable" content="yes">
<meta id="theme-color" name="theme-color" content="#fff">
<base target="_blank">
<title>img with srcset and sizes</title>
<link rel="stylesheet" href="../css/main.css" />
<style>
img {
width: 50vw;
}
</style>
</head>
<body>
<div id="container">
<h1><a href="../index.html" title="simpl.info home page">simpl.info</a> <img> with srcset and sizes</h1>
<img src="small.jpg" srcset="small.jpg 500w, medium.jpg 1000w, large.jpg 1500w" sizes="50vw" alt="Lias and Little Puss: two ten week old grey tabby kittens" />
<p>Try changing the viewport size.</p>
<p>The sizes attribute enables the browser to choose <a href="small.jpg" title="500px wide image file">small.jpg</a>, <a href="medium.jpg" title="1000px wide image file">medium.jpg</a> or <a href="large.jpg" title="1500px wide image file">large.jpg</a>, depending on the screen resolution and image display width (set to be 50% of viewport width).</p>
<p>
Image used: <span id="imgSrc"></span><br>
Image natural width: <span id="imgNaturalWidth"></span>px<br>
Image display width: <span id="imgWidth"></span>px<br>
Device pixel ratio: <span id="devicePixelRatio"></span><br>
Minimum acceptable image width: <span id="minimumWidth"></span>px<br>
Viewport width: <span id="viewportWidth"></span>px<br>
Available screen width: <span id="availableWidth"></span>px<br>
</p>
<p class="borderBelow"><a href="../srcsetwvalues" title="img element with srcset using w values, but no sizes attribute">Without the sizes attribute</a> the browser can only take into account <strong>viewport</strong> width, not image <strong>display</strong> width.</p>
<p>The <a href="../pictureart" title="Using the picture element for art direction">art direction</a> and <a href="../picturetype" title="Using the picture element with alternative sources">file types</a> examples show ways to use the picture element.</p>
<a href="//github.com/samdutton/simpl/blob/master/sizeswvalues/index.html" title="View source for this page on GitHub" id="viewSource">View source on GitHub</a>
</div>
<script src="js/main.js"></script>
<script src="../js/lib/ga.js"></script>
</body>
</html>
|
// edition:2018
// compile-flags: --crate-version 1.0.0
// @is nested.json "$.crate_version" \"1.0.0\"
// @is - "$.index[*][?(@.name=='nested')].kind" \"module\"
// @is - "$.index[*][?(@.name=='nested')].inner.is_crate" true
// @count - "$.index[*][?(@.name=='nested')].inner.items[*]" 1
// @is nested.json "$.index[*][?(@.name=='l1')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l1')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l1')].inner.items[*]" 2
pub mod l1 {
// @is nested.json "$.index[*][?(@.name=='l3')].kind" \"module\"
// @is - "$.index[*][?(@.name=='l3')].inner.is_crate" false
// @count - "$.index[*][?(@.name=='l3')].inner.items[*]" 1
// @set l3_id = - "$.index[*][?(@.name=='l3')].id"
// @has - "$.index[*][?(@.name=='l1')].inner.items[*]" $l3_id
pub mod l3 {
// @is nested.json "$.index[*][?(@.name=='L4')].kind" \"struct\"
// @is - "$.index[*][?(@.name=='L4')].inner.struct_type" \"unit\"
// @set l4_id = - "$.index[*][?(@.name=='L4')].id"
// @has - "$.index[*][?(@.name=='l3')].inner.items[*]" $l4_id
pub struct L4;
}
// @is nested.json "$.index[*][?(@.inner.source=='l3::L4')].kind" \"import\"
// @is - "$.index[*][?(@.inner.source=='l3::L4')].inner.glob" false
pub use l3::L4;
}
|
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.jetbrains.python.codeInsight;
import com.intellij.lang.Language;
import com.intellij.lang.injection.MultiHostRegistrar;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.PsiComment;
import com.intellij.psi.PsiElement;
import com.intellij.util.containers.ContainerUtil;
import com.jetbrains.python.PyNames;
import com.jetbrains.python.psi.*;
import com.jetbrains.python.psi.impl.PyCallExpressionNavigator;
import one.util.streamex.StreamEx;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.List;
import static com.jetbrains.python.PyStringFormatParser.*;
/**
* @author vlan
*/
public final class PyInjectionUtil {
public static class InjectionResult {
public static final InjectionResult EMPTY = new InjectionResult(false, true);
private final boolean myInjected;
private final boolean myStrict;
public InjectionResult(boolean injected, boolean strict) {
myInjected = injected;
myStrict = strict;
}
public boolean isInjected() {
return myInjected;
}
public boolean isStrict() {
return myStrict;
}
public InjectionResult append(@NotNull InjectionResult result) {
return new InjectionResult(myInjected || result.isInjected(), myStrict && result.isStrict());
}
}
public static final List<Class<? extends PsiElement>> ELEMENTS_TO_INJECT_IN =
ContainerUtil.immutableList(PyStringLiteralExpression.class, PyParenthesizedExpression.class, PyBinaryExpression.class,
PyCallExpression.class, PsiComment.class);
private PyInjectionUtil() {}
/**
* Returns the largest expression in the specified context that represents a string literal suitable for language injection, possibly
* with concatenation, parentheses, or formatting.
*/
@Nullable
public static PsiElement getLargestStringLiteral(@NotNull PsiElement context) {
PsiElement element = null;
for (PsiElement current = context; current != null && isStringLiteralPart(current, element); current = current.getParent()) {
element = current;
}
return element;
}
/**
* Registers language injections in the given registrar for the specified string literal element or its ancestor that contains
* string concatenations or formatting.
*/
@NotNull
public static InjectionResult registerStringLiteralInjection(@NotNull PsiElement element,
@NotNull MultiHostRegistrar registrar,
@NotNull Language language) {
registrar.startInjecting(language);
final InjectionResult result = processStringLiteral(element, registrar, "", "", Formatting.NONE);
if (result.isInjected()) {
registrar.doneInjecting();
}
return result;
}
private static boolean isStringLiteralPart(@NotNull PsiElement element, @Nullable PsiElement context) {
if (element == context || element instanceof PyStringLiteralExpression || element instanceof PsiComment) {
return true;
}
else if (element instanceof PyParenthesizedExpression) {
final PyExpression contained = ((PyParenthesizedExpression)element).getContainedExpression();
return contained != null && isStringLiteralPart(contained, context);
}
else if (element instanceof PyBinaryExpression) {
final PyBinaryExpression expr = (PyBinaryExpression)element;
final PyExpression left = expr.getLeftExpression();
final PyExpression right = expr.getRightExpression();
if (expr.isOperator("+")) {
return isStringLiteralPart(left, context) || right != null && isStringLiteralPart(right, context);
}
else if (expr.isOperator("%")) {
return right != context && isStringLiteralPart(left, context);
}
return false;
}
else if (element instanceof PyCallExpression) {
final PyExpression qualifier = getFormatCallQualifier((PyCallExpression)element);
return qualifier != null && isStringLiteralPart(qualifier, context);
}
else if (element instanceof PyReferenceExpression) {
final PyCallExpression callExpr = PyCallExpressionNavigator.getPyCallExpressionByCallee(element);
return callExpr != null && isStringLiteralPart(callExpr, context);
}
return false;
}
@Nullable
private static PyExpression getFormatCallQualifier(@NotNull PyCallExpression element) {
final PyExpression callee = element.getCallee();
if (callee instanceof PyQualifiedExpression) {
final PyQualifiedExpression qualifiedExpr = (PyQualifiedExpression)callee;
final PyExpression qualifier = qualifiedExpr.getQualifier();
if (qualifier != null && PyNames.FORMAT.equals(qualifiedExpr.getReferencedName())) {
return qualifier;
}
}
return null;
}
@NotNull
private static InjectionResult processStringLiteral(@NotNull PsiElement element, @NotNull MultiHostRegistrar registrar,
@NotNull String prefix, @NotNull String suffix, @NotNull Formatting formatting) {
final String missingValue = "missing_value";
if (element instanceof PyStringLiteralExpression) {
boolean injected = false;
boolean strict = true;
final PyStringLiteralExpression expr = (PyStringLiteralExpression)element;
for (PyStringElement stringElem : expr.getStringElements()) {
final int nodeOffsetInParent = stringElem.getTextOffset() - expr.getTextRange().getStartOffset();
final TextRange contentRange = stringElem.getContentRange();
final int contentStartOffset = contentRange.getStartOffset();
if (formatting != Formatting.NONE || stringElem.isFormatted()) {
// Each range is relative to the start of the string element
final List<TextRange> subsRanges;
if (formatting != Formatting.NONE) {
final String content = stringElem.getContent();
subsRanges = StreamEx.of(formatting == Formatting.NEW_STYLE ? parseNewStyleFormat(content) : parsePercentFormat(content))
.select(SubstitutionChunk.class)
.map(chunk -> chunk.getTextRange().shiftRight(contentStartOffset))
.toList();
}
else {
subsRanges = StreamEx.of(((PyFormattedStringElement)stringElem).getFragments())
.map(PsiElement::getTextRangeInParent)
.toList();
}
if (!subsRanges.isEmpty()) {
strict = false;
}
final TextRange sentinel = TextRange.from(contentRange.getEndOffset(), 0);
final List<TextRange> withSentinel = ContainerUtil.append(subsRanges, sentinel);
int literalChunkStart = contentStartOffset;
int literalChunkEnd;
for (int i = 0; i < withSentinel.size(); i++) {
final TextRange subRange = withSentinel.get(i);
literalChunkEnd = subRange.getStartOffset();
if (literalChunkEnd > literalChunkStart) {
final String chunkPrefix;
if (i == 0) {
chunkPrefix = prefix;
}
else if (i == 1 && withSentinel.get(0).getStartOffset() == contentStartOffset) {
chunkPrefix = missingValue;
}
else {
chunkPrefix = "";
}
final String chunkSuffix;
if (i < withSentinel.size() - 1) {
chunkSuffix = missingValue;
}
else if (i == withSentinel.size() - 1) {
chunkSuffix = suffix;
}
else {
chunkSuffix = "";
}
final TextRange chunkRange = TextRange.create(literalChunkStart, literalChunkEnd);
registrar.addPlace(chunkPrefix, chunkSuffix, expr, chunkRange.shiftRight(nodeOffsetInParent));
injected = true;
}
literalChunkStart = subRange.getEndOffset();
}
}
else {
registrar.addPlace(prefix, suffix, expr, contentRange.shiftRight(nodeOffsetInParent));
injected = true;
}
}
return new InjectionResult(injected, strict);
}
else if (element instanceof PyParenthesizedExpression) {
final PyExpression contained = ((PyParenthesizedExpression)element).getContainedExpression();
if (contained != null) {
return processStringLiteral(contained, registrar, prefix, suffix, formatting);
}
}
else if (element instanceof PyBinaryExpression) {
final PyBinaryExpression expr = (PyBinaryExpression)element;
final PyExpression left = expr.getLeftExpression();
final PyExpression right = expr.getRightExpression();
final boolean isLeftString = isStringLiteralPart(left, null);
if (expr.isOperator("+")) {
final boolean isRightString = right != null && isStringLiteralPart(right, null);
InjectionResult result = InjectionResult.EMPTY;
if (isLeftString) {
result = result.append(processStringLiteral(left, registrar, prefix, isRightString ? "" : missingValue, formatting));
}
if (isRightString) {
result = result.append(processStringLiteral(right, registrar, isLeftString ? "" : missingValue, suffix, formatting));
}
return result;
}
else if (expr.isOperator("%")) {
return processStringLiteral(left, registrar, prefix, suffix, Formatting.PERCENT);
}
}
else if (element instanceof PyCallExpression) {
final PyExpression qualifier = getFormatCallQualifier((PyCallExpression)element);
if (qualifier != null) {
return processStringLiteral(qualifier, registrar, prefix, suffix, Formatting.NEW_STYLE);
}
}
return InjectionResult.EMPTY;
}
private enum Formatting {
NONE,
PERCENT,
NEW_STYLE
}
}
|
/***************************************************************************//**
* @file rail_ble.h
* @brief The BLE specific header file for the RAIL library.
* @copyright Copyright 2016 Silicon Laboratories, Inc. http://www.silabs.com
******************************************************************************/
#ifndef __RAIL_BLE_H__
#define __RAIL_BLE_H__
// Get the standard include types
#include <stdint.h>
#include <stdbool.h>
// Get the RAIL specific structures and types
#include "rail_types.h"
/**
* @addtogroup BLE
* @ingroup Protocol_Specific
* Accelerator routines for Bluetooth Low Energy (BLE).
*
* The APIs in this module help take care of configuring the radio for BLE
* operation and provide some additional helper routines necessary for
* normal BLE send/receive that aren't available directly in RAIL. To initialize
* the radio you will still have to call RAIL_Init(). However
* RAIL_ConfigChannels(), and RAIL_ConfigRadio() will be taken care of for you.
*
* To implement a standard BLE link layer you will also need to handle tight
* turnaround times and send packets at specific instants. This can all be
* managed through general RAIL functions like RAIL_ScheduleTx(),
* RAIL_ScheduleRx(), and RAIL_SetStateTiming(). See the full RAIL API for more
* useful functions.
*
* A simple example of how to setup your application to be in BLE mode is shown
* below. Note that this will put the radio on the first advertising channel
* with the advertising Access Address. In any full featured BLE application you
* will need to use the RAIL_BLE_ConfigChannelRadioParams() function to change
* the sync word and other parameters as needed based on your connection.
*
* @code{.c}
*
* // Put the radio into receive on the first BLE advertising channel
* int bleAdvertiseEnable(void)
* {
* // Call the BLE initialization function to load the right radio config
* RAIL_BLE_Init();
*
* // Configure us for the first advertising channel (Physical: 0, Logical: 37)
* // The CRC init value and Access Address come from the BLE specification.
* RAIL_BLE_ConfigChannelRadioParams(0x555555, 0x8E89BED6, 37, false);
*
* // Start receiving on this channel (Physical: 0, Logical: 37)
* RAIL_StartRx(0);
* }
* @endcode
*
* @{
*/
/**
* @enum RAIL_BLE_Coding_t
* @brief The variant of the BLE Coded PHY
*/
RAIL_ENUM(RAIL_BLE_Coding_t) {
RAIL_BLE_Coding_125kbps = 0,
RAIL_BLE_Coding_125kbps_DSA = 1,
RAIL_BLE_Coding_500kbps = 2,
RAIL_BLE_Coding_500kbps_DSA = 3,
};
/**
* @struct RAIL_BLE_State_t
* @brief State structure for BLE.
*
* This structure must be allocated in application global read-write memory
* that persists for the duration of BLE usage. It cannot be allocated
* in read-only memory or on the call stack.
*/
typedef struct RAIL_BLE_State {
uint32_t crcInit; /**< The value used for CRC initialization. */
uint32_t accessAddress; /**< The access address used for the connection. */
uint16_t channel; /**< The logical channel used. */
bool disableWhitening; /**< Whether the whitening engine should be off. */
} RAIL_BLE_State_t;
/**
* Configure RAIL to run in BLE mode.
*
* @param[in] railHandle Handle for RAIL instance.
* This function will change your radio and channel configuration and other
* parameters to match what is needed for BLE. If you need to switch back to a
* default RAIL mode then you must call RAIL_BLE_Deinit() first. This function
* will configure the protocol output on PTI to \ref RAIL_PTI_PROTOCOL_BLE.
*/
void RAIL_BLE_Init(RAIL_Handle_t railHandle);
/**
* Take RAIL out of BLE mode.
*
* @param[in] railHandle Handle for RAIL instance.
* This function will undo some of the configuration that happens when you call
* RAIL_BLE_Init(). After this you can safely run your normal radio
* initialization code to use a non-BLE configuration. This function will \b
* not change back your radio or channel configurations so you must do this by
* manually reinitializing. This also resets the protocol output on PTI to \ref
* RAIL_PTI_PROTOCOL_CUSTOM.
*/
void RAIL_BLE_Deinit(RAIL_Handle_t railHandle);
/**
* Determine whether BLE mode is enabled or not.
*
* @param[in] railHandle Handle for RAIL instance.
* @return True if BLE mode is enabled and false otherwise.
* This function returns the current status of RAIL's BLE mode. It is enabled by
* a call to RAIL_BLE_Init() and disabled by a call to RAIL_BLE_Deinit().
*/
bool RAIL_BLE_IsEnabled(RAIL_Handle_t railHandle);
/**
* Switch the Viterbi 1Mbps BLE PHY.
*
* @param[in] railHandle Handle for RAIL instance.
* @return Status code indicating success of the function call.
*
* You can use this function to switch back to the defualt BLE 1Mbps PHY if you
* have switched to the 2Mbps or another configuration. You may only call this
* function after initializing BLE and while the radio is idle.
*/
RAIL_Status_t RAIL_BLE_ConfigPhy1MbpsViterbi(RAIL_Handle_t railHandle);
/**
* Switch the legacy non-Viterbi 1Mbps BLE PHY.
*
* @param[in] railHandle Handle for RAIL instance.
* @return Status code indicating success of the function call.
*
* You can use this function to switch back to the legacy BLE 1Mbps PHY if you
* have switched to the 2Mbps or another configuration. You may only call this
* function after initializing BLE and while the radio is idle.
*/
RAIL_Status_t RAIL_BLE_ConfigPhy1Mbps(RAIL_Handle_t railHandle);
/**
* Switch the Viterbi 2Mbps BLE PHY.
*
* @param[in] railHandle Handle for RAIL instance.
* @return Status code indicating success of the function call.
*
* You can use this function to switch back to the BLE 2Mbps PHY from the
* default 1Mbps option. You may only call this function after initializing BLE
* and while the radio is idle.
*
* @note Not all chips support the 2Mbps PHY. Consult your part's reference
* manual to be sure that it does before trying this.
*/
RAIL_Status_t RAIL_BLE_ConfigPhy2MbpsViterbi(RAIL_Handle_t railHandle);
/**
* Switch the legacy non-Viterbi 2Mbps BLE PHY.
*
* @param[in] railHandle Handle for RAIL instance.
* @return Status code indicating success of the function call.
*
* You can use this function to switch back to legacy BLE 2Mbps PHY from the
* default 1Mbps option. You may only call this function after initializing BLE
* and while the radio is idle.
*
* @note Not all chips support the 2Mbps PHY. Consult your part's reference
* manual to be sure that it does before trying this.
*/
RAIL_Status_t RAIL_BLE_ConfigPhy2Mbps(RAIL_Handle_t railHandle);
/**
* Switch to the BLE Coded PHY.
*
* @param[in] railHandle Handle for RAIL instance.
* @param[in] ble_coding The RAIL_BLE_Coding_t to use
* @return Status code indicating success of the function call.
*
* You can use this function to switch back to BLE Coded PHY from the default
* 1Mbps option. You may only call this function after initializing BLE and
* while the radio is idle. When using a BLE Coded PHY, the subPhy in
* RAIL_AppendedInfo_t marks the coding of the received packet. A subPhy of 0
* marks a 500kbps packet, and a subPhy of 1 marks a 125kbps packet.
*
* @note Not all chips support the BLE Coded PHY. Consult your part's reference
* manual to be sure that it does before trying this.
*/
RAIL_Status_t RAIL_BLE_ConfigPhyCoded(RAIL_Handle_t railHandle,
RAIL_BLE_Coding_t ble_coding);
/**
* Helper function to change BLE radio parameters.
*
* @param[in] railHandle Handle for RAIL instance.
* @param[in] crcInit The value to use for CRC initialization.
* @param[in] accessAddress The access address to use for the connection.
* @param[in] channel The logical channel that you're changing to. This is used to
* initialize the whitener if you're using whitening.
* @param[in] disableWhitening This can turn off the whitening engine and is useful
* for sending BLE test mode packets that don't have this turned on.
* @return Status code indicating success of the function call.
*
* This function can be used to switch radio parameters on every connection
* and/or channel change. It is BLE-aware and will set the access address,
* preamble, CRC initialization value, and whitening configuration without
* requiring you to load a new radio config.
*/
RAIL_Status_t RAIL_BLE_ConfigChannelRadioParams(RAIL_Handle_t railHandle,
uint32_t crcInit,
uint32_t accessAddress,
uint16_t channel,
bool disableWhitening);
/** @} */ // end of BLE
#endif // __RAIL_BLE_H__
|
package main;
import com.intellij.util.concurrency.annotations.fake.RequiresEdt;
public class MethodHasOtherAnnotationBefore {
@Deprecated
@RequiresEdt
public Object test() {
return null;
}
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.codehaus.groovy.transform.trait;
import groovy.transform.CompileStatic;
import org.codehaus.groovy.ast.AnnotationNode;
import org.codehaus.groovy.ast.ClassHelper;
import org.codehaus.groovy.ast.ClassNode;
import org.codehaus.groovy.ast.FieldNode;
import org.codehaus.groovy.ast.GenericsType;
import org.codehaus.groovy.ast.MethodNode;
import org.codehaus.groovy.ast.Parameter;
import org.codehaus.groovy.ast.PropertyNode;
import org.codehaus.groovy.ast.expr.ArgumentListExpression;
import org.codehaus.groovy.ast.expr.ArrayExpression;
import org.codehaus.groovy.ast.expr.BinaryExpression;
import org.codehaus.groovy.ast.expr.BooleanExpression;
import org.codehaus.groovy.ast.expr.CastExpression;
import org.codehaus.groovy.ast.expr.ClassExpression;
import org.codehaus.groovy.ast.expr.ConstantExpression;
import org.codehaus.groovy.ast.expr.Expression;
import org.codehaus.groovy.ast.expr.MethodCallExpression;
import org.codehaus.groovy.ast.expr.StaticMethodCallExpression;
import org.codehaus.groovy.ast.expr.VariableExpression;
import org.codehaus.groovy.ast.stmt.BlockStatement;
import org.codehaus.groovy.ast.stmt.EmptyStatement;
import org.codehaus.groovy.ast.stmt.ExpressionStatement;
import org.codehaus.groovy.ast.stmt.IfStatement;
import org.codehaus.groovy.ast.stmt.ReturnStatement;
import org.codehaus.groovy.ast.stmt.Statement;
import org.codehaus.groovy.ast.tools.GeneralUtils;
import org.codehaus.groovy.ast.tools.GenericsUtils;
import org.codehaus.groovy.classgen.asm.BytecodeHelper;
import org.codehaus.groovy.control.CompilationUnit;
import org.codehaus.groovy.control.SourceUnit;
import org.codehaus.groovy.runtime.InvokerHelper;
import org.codehaus.groovy.runtime.MetaClassHelper;
import org.codehaus.groovy.syntax.SyntaxException;
import org.codehaus.groovy.syntax.Token;
import org.codehaus.groovy.syntax.Types;
import org.codehaus.groovy.transform.ASTTransformationCollectorCodeVisitor;
import org.codehaus.groovy.transform.sc.StaticCompileTransformation;
import org.objectweb.asm.Opcodes;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import static org.codehaus.groovy.ast.tools.GenericsUtils.correctToGenericsSpecRecurse;
/**
* This class contains a static utility method {@link #doExtendTraits(org.codehaus.groovy.ast.ClassNode, org.codehaus.groovy.control.SourceUnit, org.codehaus.groovy.control.CompilationUnit)}
* aimed at generating code for a classnode implementing a trait.
*
* @author Cédric Champeau
* @since 2.3.0
*/
public abstract class TraitComposer {
/**
* This comparator is used to make sure that generated direct getters appear first in the list of method
* nodes.
*/
private static final Comparator<MethodNode> GETTER_FIRST_COMPARATOR = new Comparator<MethodNode>() {
public int compare(final MethodNode o1, final MethodNode o2) {
if (o1.getName().endsWith(Traits.DIRECT_GETTER_SUFFIX)) return -1;
return 1;
}
};
public static final ClassNode COMPILESTATIC_CLASSNODE = ClassHelper.make(CompileStatic.class);
/**
* Given a class node, if this class node implements a trait, then generate all the appropriate
* code which delegates calls to the trait. It is safe to call this method on a class node which
* does not implement a trait.
* @param cNode a class node
* @param unit the source unit
*/
public static void doExtendTraits(final ClassNode cNode, final SourceUnit unit, final CompilationUnit cu) {
if (cNode.isInterface()) return;
boolean isItselfTrait = Traits.isTrait(cNode);
SuperCallTraitTransformer superCallTransformer = new SuperCallTraitTransformer(unit);
if (isItselfTrait) {
checkTraitAllowed(cNode, unit);
return;
}
if (!cNode.getNameWithoutPackage().endsWith(Traits.TRAIT_HELPER)) {
List<ClassNode> traits = findTraits(cNode);
for (ClassNode trait : traits) {
TraitHelpersTuple helpers = Traits.findHelpers(trait);
applyTrait(trait, cNode, helpers);
superCallTransformer.visitClass(cNode);
if (unit!=null) {
ASTTransformationCollectorCodeVisitor collector = new ASTTransformationCollectorCodeVisitor(unit, cu.getTransformLoader());
collector.visitClass(cNode);
}
}
}
}
private static List<ClassNode> findTraits(ClassNode cNode) {
LinkedHashSet<ClassNode> interfaces = new LinkedHashSet<ClassNode>();
Traits.collectAllInterfacesReverseOrder(cNode, interfaces);
List<ClassNode> traits = new LinkedList<ClassNode>();
for (ClassNode candidate : interfaces) {
if (Traits.isAnnotatedWithTrait(candidate)) {
traits.add(candidate);
}
}
return traits;
}
private static void checkTraitAllowed(final ClassNode bottomTrait, final SourceUnit unit) {
ClassNode superClass = bottomTrait.getSuperClass();
if (superClass==null || ClassHelper.OBJECT_TYPE.equals(superClass)) return;
if (!Traits.isTrait(superClass)) {
unit.addError(new SyntaxException("A trait can only inherit from another trait", superClass.getLineNumber(), superClass.getColumnNumber()));
}
}
private static void applyTrait(final ClassNode trait, final ClassNode cNode, final TraitHelpersTuple helpers) {
ClassNode helperClassNode = helpers.getHelper();
ClassNode fieldHelperClassNode = helpers.getFieldHelper();
Map<String,ClassNode> genericsSpec = GenericsUtils.createGenericsSpec(cNode);
genericsSpec = GenericsUtils.createGenericsSpec(trait, genericsSpec);
for (MethodNode methodNode : helperClassNode.getAllDeclaredMethods()) {
String name = methodNode.getName();
Parameter[] helperMethodParams = methodNode.getParameters();
boolean isAbstract = methodNode.isAbstract();
if (!isAbstract && helperMethodParams.length > 0 && ((methodNode.getModifiers() & Opcodes.ACC_STATIC) == Opcodes.ACC_STATIC) && !name.contains("$")) {
ArgumentListExpression argList = new ArgumentListExpression();
argList.addExpression(new VariableExpression("this"));
Parameter[] origParams = new Parameter[helperMethodParams.length - 1];
Parameter[] params = new Parameter[helperMethodParams.length - 1];
System.arraycopy(methodNode.getParameters(), 1, params, 0, params.length);
Map<String,ClassNode> methodGenericsSpec = new LinkedHashMap<String, ClassNode>(genericsSpec);
MethodNode originalMethod = trait.getMethod(name, params);
// Original method may be null in case of a private method
if (originalMethod!=null) {
methodGenericsSpec = GenericsUtils.addMethodGenerics(originalMethod, methodGenericsSpec);
}
for (int i = 1; i < helperMethodParams.length; i++) {
Parameter parameter = helperMethodParams[i];
ClassNode originType = parameter.getOriginType();
ClassNode fixedType = correctToGenericsSpecRecurse(methodGenericsSpec, originType);
Parameter newParam = new Parameter(fixedType, "arg" + i);
List<AnnotationNode> copied = new LinkedList<AnnotationNode>();
List<AnnotationNode> notCopied = new LinkedList<AnnotationNode>();
GeneralUtils.copyAnnotatedNodeAnnotations(parameter, copied, notCopied);
newParam.addAnnotations(copied);
params[i - 1] = newParam;
origParams[i-1] = parameter;
argList.addExpression(new VariableExpression(params[i - 1]));
}
createForwarderMethod(trait, cNode, methodNode, originalMethod, helperClassNode, methodGenericsSpec, helperMethodParams, origParams, params, argList);
}
}
cNode.addObjectInitializerStatements(new ExpressionStatement(
new MethodCallExpression(
new ClassExpression(helperClassNode),
Traits.INIT_METHOD,
new ArgumentListExpression(new VariableExpression("this")))
));
MethodCallExpression staticInitCall = new MethodCallExpression(
new ClassExpression(helperClassNode),
Traits.STATIC_INIT_METHOD,
new ArgumentListExpression(new ClassExpression(cNode)));
MethodNode staticInitMethod = new MethodNode(
Traits.STATIC_INIT_METHOD, Opcodes.ACC_STATIC | Opcodes.ACC_PUBLIC, ClassHelper.VOID_TYPE,
new Parameter[] {new Parameter(ClassHelper.CLASS_Type,"clazz")}, ClassNode.EMPTY_ARRAY, EmptyStatement.INSTANCE);
staticInitMethod.setDeclaringClass(helperClassNode);
staticInitCall.setMethodTarget(staticInitMethod);
cNode.addStaticInitializerStatements(Collections.<Statement>singletonList(new ExpressionStatement(
staticInitCall
)), false);
if (fieldHelperClassNode != null && !cNode.declaresInterface(fieldHelperClassNode)) {
// we should implement the field helper interface too
cNode.addInterface(fieldHelperClassNode);
// implementation of methods
List<MethodNode> declaredMethods = fieldHelperClassNode.getAllDeclaredMethods();
Collections.sort(declaredMethods, GETTER_FIRST_COMPARATOR);
for (MethodNode methodNode : declaredMethods) {
String fieldName = methodNode.getName();
if (fieldName.endsWith(Traits.DIRECT_GETTER_SUFFIX) || fieldName.endsWith(Traits.DIRECT_SETTER_SUFFIX)) {
int suffixIdx = fieldName.lastIndexOf("$");
fieldName = fieldName.substring(0, suffixIdx);
String operation = methodNode.getName().substring(suffixIdx + 1);
boolean getter = "get".equals(operation);
ClassNode returnType = correctToGenericsSpecRecurse(genericsSpec, methodNode.getReturnType());
int isStatic = 0;
boolean publicField = true;
FieldNode helperField = fieldHelperClassNode.getField(Traits.FIELD_PREFIX + Traits.PUBLIC_FIELD_PREFIX + fieldName);
if (helperField==null) {
publicField = false;
helperField = fieldHelperClassNode.getField(Traits.FIELD_PREFIX + Traits.PRIVATE_FIELD_PREFIX + fieldName);
}
if (helperField==null) {
publicField = true;
// try to find a static one
helperField = fieldHelperClassNode.getField(Traits.STATIC_FIELD_PREFIX+Traits.PUBLIC_FIELD_PREFIX+fieldName);
if (helperField==null) {
publicField = false;
helperField = fieldHelperClassNode.getField(Traits.STATIC_FIELD_PREFIX+Traits.PRIVATE_FIELD_PREFIX +fieldName);
}
isStatic = Opcodes.ACC_STATIC;
}
if (getter) {
// add field
if (helperField!=null) {
List<AnnotationNode> copied = new LinkedList<AnnotationNode>();
List<AnnotationNode> notCopied = new LinkedList<AnnotationNode>();
GeneralUtils.copyAnnotatedNodeAnnotations(helperField, copied, notCopied);
FieldNode fieldNode = cNode.addField(fieldName, (publicField?Opcodes.ACC_PUBLIC:Opcodes.ACC_PRIVATE) | isStatic, returnType, null);
fieldNode.addAnnotations(copied);
}
}
Parameter[] newParams;
if (getter) {
newParams = Parameter.EMPTY_ARRAY;
} else {
ClassNode originType = methodNode.getParameters()[0].getOriginType();
ClassNode fixedType = originType.isGenericsPlaceHolder()?ClassHelper.OBJECT_TYPE:correctToGenericsSpecRecurse(genericsSpec, originType);
newParams = new Parameter[]{new Parameter(fixedType, "val")};
}
Expression fieldExpr = new VariableExpression(cNode.getField(fieldName));
Statement body =
getter ? new ReturnStatement(fieldExpr) :
new ExpressionStatement(
new BinaryExpression(
fieldExpr,
Token.newSymbol(Types.EQUAL, 0, 0),
new VariableExpression(newParams[0])
)
);
MethodNode impl = new MethodNode(
methodNode.getName(),
Opcodes.ACC_PUBLIC | isStatic,
returnType,
newParams,
ClassNode.EMPTY_ARRAY,
body
);
AnnotationNode an = new AnnotationNode(COMPILESTATIC_CLASSNODE);
impl.addAnnotation(an);
cNode.addTransform(StaticCompileTransformation.class, an);
cNode.addMethod(impl);
}
}
}
}
private static void createForwarderMethod(
ClassNode trait,
ClassNode targetNode,
MethodNode helperMethod,
MethodNode originalMethod,
ClassNode helperClassNode,
Map<String,ClassNode> genericsSpec,
Parameter[] helperMethodParams,
Parameter[] traitMethodParams,
Parameter[] forwarderParams,
ArgumentListExpression helperMethodArgList) {
MethodCallExpression mce = new MethodCallExpression(
new ClassExpression(helperClassNode),
helperMethod.getName(),
helperMethodArgList
);
mce.setImplicitThis(false);
genericsSpec = GenericsUtils.addMethodGenerics(helperMethod,genericsSpec);
ClassNode[] exceptionNodes = correctToGenericsSpecRecurse(genericsSpec, copyExceptions(helperMethod.getExceptions()));
ClassNode fixedReturnType = correctToGenericsSpecRecurse(genericsSpec, helperMethod.getReturnType());
Expression forwardExpression = genericsSpec.isEmpty()?mce:new CastExpression(fixedReturnType,mce);
int access = helperMethod.getModifiers();
// we could rely on the first parameter name ($static$self) but that information is not
// guaranteed to be always present
boolean isHelperForStaticMethod = helperMethodParams[0].getOriginType().equals(ClassHelper.CLASS_Type);
if (Modifier.isPrivate(access) && !isHelperForStaticMethod) {
// do not create forwarder for private methods
// see GROOVY-7213
return;
}
if (!isHelperForStaticMethod) {
access = access ^ Opcodes.ACC_STATIC;
}
MethodNode forwarder = new MethodNode(
helperMethod.getName(),
access,
fixedReturnType,
forwarderParams,
exceptionNodes,
new ExpressionStatement(forwardExpression)
);
List<AnnotationNode> copied = new LinkedList<AnnotationNode>();
List<AnnotationNode> notCopied = Collections.emptyList(); // at this point, should *always* stay empty
GeneralUtils.copyAnnotatedNodeAnnotations(helperMethod, copied, notCopied);
if (!copied.isEmpty()) {
forwarder.addAnnotations(copied);
}
if (originalMethod!=null) {
GenericsType[] newGt = GenericsUtils.applyGenericsContextToPlaceHolders(genericsSpec, originalMethod.getGenericsTypes());
newGt = removeNonPlaceHolders(newGt);
forwarder.setGenericsTypes(newGt);
}
// add a helper annotation indicating that it is a bridge method
AnnotationNode bridgeAnnotation = new AnnotationNode(Traits.TRAITBRIDGE_CLASSNODE);
bridgeAnnotation.addMember("traitClass", new ClassExpression(trait));
bridgeAnnotation.addMember("desc", new ConstantExpression(BytecodeHelper.getMethodDescriptor(helperMethod.getReturnType(), traitMethodParams)));
forwarder.addAnnotation(
bridgeAnnotation
);
if (!shouldSkipMethod(targetNode, forwarder.getName(), forwarderParams)) {
targetNode.addMethod(forwarder);
}
createSuperForwarder(targetNode, forwarder, genericsSpec);
}
private static GenericsType[] removeNonPlaceHolders(GenericsType[] oldTypes) {
if (oldTypes==null || oldTypes.length==0) return oldTypes;
ArrayList<GenericsType> l = new ArrayList<GenericsType>(Arrays.asList(oldTypes));
Iterator<GenericsType> it = l.iterator();
boolean modified = false;
while (it.hasNext()) {
GenericsType gt = it.next();
if (!gt.isPlaceholder()) {
it.remove();
modified = true;
}
}
if (!modified) return oldTypes;
if (l.size()==0) return null;
return l.toArray(new GenericsType[l.size()]);
}
/**
* Creates, if necessary, a super forwarder method, for stackable traits.
* @param forwarder a forwarder method
* @param genericsSpec
*/
private static void createSuperForwarder(ClassNode targetNode, MethodNode forwarder, final Map<String,ClassNode> genericsSpec) {
List<ClassNode> interfaces = new ArrayList<ClassNode>(Traits.collectAllInterfacesReverseOrder(targetNode, new LinkedHashSet<ClassNode>()));
String name = forwarder.getName();
Parameter[] forwarderParameters = forwarder.getParameters();
LinkedHashSet<ClassNode> traits = new LinkedHashSet<ClassNode>();
List<MethodNode> superForwarders = new LinkedList<MethodNode>();
for (ClassNode node : interfaces) {
if (Traits.isTrait(node)) {
MethodNode method = node.getDeclaredMethod(name, forwarderParameters);
if (method!=null) {
// a similar method exists, we need a super bridge
// trait$super$foo(Class currentTrait, ...)
traits.add(node);
superForwarders.add(method);
}
}
}
for (MethodNode superForwarder : superForwarders) {
doCreateSuperForwarder(targetNode, superForwarder, traits.toArray(new ClassNode[traits.size()]), genericsSpec);
}
}
/**
* Creates a method to dispatch to "super traits" in a "stackable" fashion. The generated method looks like this:
* <p>
* <code>ReturnType trait$super$method(Class clazz, Arg1 arg1, Arg2 arg2, ...) {
* if (SomeTrait.is(A) { return SomeOtherTrait$Trait$Helper.method(this, arg1, arg2) }
* super.method(arg1,arg2)
* }</code>
* </p>
* @param targetNode
* @param forwarderMethod
* @param interfacesToGenerateForwarderFor
* @param genericsSpec
*/
private static void doCreateSuperForwarder(ClassNode targetNode, MethodNode forwarderMethod, ClassNode[] interfacesToGenerateForwarderFor, Map<String,ClassNode> genericsSpec) {
Parameter[] parameters = forwarderMethod.getParameters();
Parameter[] superForwarderParams = new Parameter[parameters.length];
for (int i = 0; i < parameters.length; i++) {
Parameter parameter = parameters[i];
ClassNode originType = parameter.getOriginType();
superForwarderParams[i] = new Parameter(correctToGenericsSpecRecurse(genericsSpec, originType), parameter.getName());
}
for (int i = 0; i < interfacesToGenerateForwarderFor.length; i++) {
final ClassNode current = interfacesToGenerateForwarderFor[i];
final ClassNode next = i < interfacesToGenerateForwarderFor.length - 1 ? interfacesToGenerateForwarderFor[i + 1] : null;
String forwarderName = Traits.getSuperTraitMethodName(current, forwarderMethod.getName());
if (targetNode.getDeclaredMethod(forwarderName, superForwarderParams) == null) {
ClassNode returnType = correctToGenericsSpecRecurse(genericsSpec, forwarderMethod.getReturnType());
Statement delegate = next == null ? createSuperFallback(forwarderMethod, returnType) : createDelegatingForwarder(forwarderMethod, next);
MethodNode methodNode = targetNode.addMethod(forwarderName, Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC, returnType, superForwarderParams, ClassNode.EMPTY_ARRAY, delegate);
methodNode.setGenericsTypes(forwarderMethod.getGenericsTypes());
}
}
}
private static Statement createSuperFallback(MethodNode forwarderMethod, ClassNode returnType) {
ArgumentListExpression args = new ArgumentListExpression();
Parameter[] forwarderMethodParameters = forwarderMethod.getParameters();
for (final Parameter forwarderMethodParameter : forwarderMethodParameters) {
args.addExpression(new VariableExpression(forwarderMethodParameter));
}
BinaryExpression instanceOfExpr = new BinaryExpression(new VariableExpression("this"), Token.newSymbol(Types.KEYWORD_INSTANCEOF, -1, -1), new ClassExpression(Traits.GENERATED_PROXY_CLASSNODE));
MethodCallExpression superCall = new MethodCallExpression(
new VariableExpression("super"),
forwarderMethod.getName(),
args
);
superCall.setImplicitThis(false);
CastExpression proxyReceiver = new CastExpression(Traits.GENERATED_PROXY_CLASSNODE, new VariableExpression("this"));
MethodCallExpression getProxy = new MethodCallExpression(proxyReceiver, "getProxyTarget", ArgumentListExpression.EMPTY_ARGUMENTS);
getProxy.setImplicitThis(true);
StaticMethodCallExpression proxyCall = new StaticMethodCallExpression(
ClassHelper.make(InvokerHelper.class),
"invokeMethod",
new ArgumentListExpression(getProxy, new ConstantExpression(forwarderMethod.getName()), new ArrayExpression(ClassHelper.OBJECT_TYPE, args.getExpressions()))
);
IfStatement stmt = new IfStatement(
new BooleanExpression(instanceOfExpr),
new ExpressionStatement(new CastExpression(returnType,proxyCall)),
new ExpressionStatement(superCall)
);
return stmt;
}
private static Statement createDelegatingForwarder(final MethodNode forwarderMethod, final ClassNode next) {
// generates --> next$Trait$Helper.method(this, arg1, arg2)
TraitHelpersTuple helpers = Traits.findHelpers(next);
ArgumentListExpression args = new ArgumentListExpression();
args.addExpression(new VariableExpression("this"));
Parameter[] forwarderMethodParameters = forwarderMethod.getParameters();
for (final Parameter forwarderMethodParameter : forwarderMethodParameters) {
args.addExpression(new VariableExpression(forwarderMethodParameter));
}
StaticMethodCallExpression delegateCall = new StaticMethodCallExpression(
helpers.getHelper(),
forwarderMethod.getName(),
args
);
Statement result;
if (ClassHelper.VOID_TYPE.equals(forwarderMethod.getReturnType())) {
BlockStatement stmt = new BlockStatement();
stmt.addStatement(new ExpressionStatement(delegateCall));
stmt.addStatement(new ReturnStatement(new ConstantExpression(null)));
result = stmt;
} else {
result = new ReturnStatement(delegateCall);
}
return result;
}
private static ClassNode[] copyExceptions(final ClassNode[] sourceExceptions) {
ClassNode[] exceptionNodes = new ClassNode[sourceExceptions == null ? 0 : sourceExceptions.length];
System.arraycopy(sourceExceptions, 0, exceptionNodes, 0, exceptionNodes.length);
return exceptionNodes;
}
private static boolean shouldSkipMethod(final ClassNode cNode, final String name, final Parameter[] params) {
if (isExistingProperty(name, cNode, params) || cNode.getDeclaredMethod(name, params)!=null) {
// override exists in the weaved class itself
return true;
}
return false;
}
/**
* An utility method which tries to find a method with default implementation (in the Java 8 semantics).
* @param cNode a class node
* @param name the name of the method
* @param params the parameters of the method
* @return a method node corresponding to a default method if it exists
*/
private static MethodNode findDefaultMethodFromInterface(final ClassNode cNode, final String name, final Parameter[] params) {
if (cNode == null) {
return null;
}
if (cNode.isInterface()) {
MethodNode method = cNode.getMethod(name, params);
if (method!=null && !method.isAbstract()) {
// this is a Java 8 only behavior!
return method;
}
}
ClassNode[] interfaces = cNode.getInterfaces();
for (ClassNode anInterface : interfaces) {
MethodNode res = findDefaultMethodFromInterface(anInterface, name, params);
if (res!=null) {
return res;
}
}
return findDefaultMethodFromInterface(cNode.getSuperClass(), name, params);
}
private static boolean isExistingProperty(final String methodName, final ClassNode cNode, final Parameter[] params) {
String propertyName = methodName;
boolean getter = false;
if (methodName.startsWith("get")) {
propertyName = propertyName.substring(3);
getter = true;
} else if (methodName.startsWith("is")) {
propertyName = propertyName.substring(2);
getter = true;
} else if (methodName.startsWith("set")) {
propertyName = propertyName.substring(3);
} else {
return false;
}
if (getter && params.length>0) {
return false;
}
if (!getter && params.length!=1) {
return false;
}
if (propertyName.length()==0) {
return false;
}
propertyName = MetaClassHelper.convertPropertyName(propertyName);
PropertyNode pNode = cNode.getProperty(propertyName);
return pNode != null;
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#nullable disable
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Linq;
using System.Threading;
using Microsoft.CodeAnalysis.Editor.Shared.Tagging;
using Microsoft.CodeAnalysis.Editor.Shared.Threading;
using Microsoft.CodeAnalysis.Editor.Shared.Utilities;
using Microsoft.CodeAnalysis.Options;
using Microsoft.CodeAnalysis.Shared.TestHooks;
using Microsoft.CodeAnalysis.Text;
using Microsoft.VisualStudio.Text;
using Microsoft.VisualStudio.Text.Editor;
using Microsoft.VisualStudio.Text.Tagging;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.Editor.Tagging
{
internal partial class AbstractAsynchronousTaggerProvider<TTag>
{
private sealed partial class TagSource : ForegroundThreadAffinitizedObject
{
#region Fields that can be accessed from either thread
/// <summary>
/// The async worker we defer to handle foreground/background thread management for this
/// tagger. Note: some operations we perform on this must be uncancellable. Specifically,
/// once we've updated our internal state we need to *ensure* that the UI eventually gets in
/// sync with it. As such, we allow cancellation of our tasks *until* we update our state.
/// From that point on, we must proceed and execute the tasks.
/// </summary>
private readonly AsynchronousSerialWorkQueue _workQueue;
private readonly AbstractAsynchronousTaggerProvider<TTag> _dataSource;
private readonly IEqualityComparer<ITagSpan<TTag>> _tagSpanComparer;
/// <summary>
/// async operation notifier
/// </summary>
private readonly IAsynchronousOperationListener _asyncListener;
/// <summary>
/// foreground notification service
/// </summary>
private readonly IForegroundNotificationService _notificationService;
#endregion
#region Fields that can only be accessed from the foreground thread
private readonly ITextView _textViewOpt;
private readonly ITextBuffer _subjectBuffer;
/// <summary>
/// Our tagger event source that lets us know when we should call into the tag producer for
/// new tags.
/// </summary>
private readonly ITaggerEventSource _eventSource;
/// <summary>
/// During the time that we are paused from updating the UI, we will use these tags instead.
/// </summary>
private ImmutableDictionary<ITextBuffer, TagSpanIntervalTree<TTag>> _previousCachedTagTrees;
/// <summary>
/// accumulated text changes since last tag calculation
/// </summary>
private TextChangeRange? _accumulatedTextChanges_doNotAccessDirectly;
private ImmutableDictionary<ITextBuffer, TagSpanIntervalTree<TTag>> _cachedTagTrees_doNotAccessDirectly;
private object _state_doNotAccessDirecty;
private bool _upToDate_doNotAccessDirectly = false;
#endregion
public event Action<ICollection<KeyValuePair<ITextBuffer, DiffResult>>, bool> TagsChangedForBuffer;
public event EventHandler Paused;
public event EventHandler Resumed;
/// <summary>
/// A cancellation source we use for the initial tagging computation. We only cancel
/// if our ref count actually reaches 0. Otherwise, we always try to compute the initial
/// set of tags for our view/buffer.
/// </summary>
private readonly CancellationTokenSource _initialComputationCancellationTokenSource = new();
public TaggerDelay AddedTagNotificationDelay => _dataSource.AddedTagNotificationDelay;
public TaggerDelay RemovedTagNotificationDelay => _dataSource.RemovedTagNotificationDelay;
public TagSource(
ITextView textViewOpt,
ITextBuffer subjectBuffer,
AbstractAsynchronousTaggerProvider<TTag> dataSource,
IAsynchronousOperationListener asyncListener,
IForegroundNotificationService notificationService)
: base(dataSource.ThreadingContext)
{
if (dataSource.SpanTrackingMode == SpanTrackingMode.Custom)
{
throw new ArgumentException("SpanTrackingMode.Custom not allowed.", "spanTrackingMode");
}
_subjectBuffer = subjectBuffer;
_textViewOpt = textViewOpt;
_dataSource = dataSource;
_asyncListener = asyncListener;
_notificationService = notificationService;
_tagSpanComparer = new TagSpanComparer(_dataSource.TagComparer);
DebugRecordInitialStackTrace();
_workQueue = new AsynchronousSerialWorkQueue(ThreadingContext, asyncListener);
this.CachedTagTrees = ImmutableDictionary.Create<ITextBuffer, TagSpanIntervalTree<TTag>>();
_eventSource = CreateEventSource();
Connect();
// Start computing the initial set of tags immediately. We want to get the UI
// to a complete state as soon as possible.
ComputeInitialTags();
}
private void ComputeInitialTags()
{
// Note: we always kick this off to the new UI pump instead of computing tags right
// on this thread. The reason for that is that we may be getting created at a time
// when the view itself is initializing. As such the view is not in a state where
// we want code touching it.
RegisterNotification(
() => RecomputeTagsForeground(initialTags: true),
delay: 0,
cancellationToken: GetCancellationToken(initialTags: true));
}
private ITaggerEventSource CreateEventSource()
{
var eventSource = _dataSource.CreateEventSource(_textViewOpt, _subjectBuffer);
// If there are any options specified for this tagger, then also hook up event
// notifications for when those options change.
var optionChangedEventSources =
_dataSource.Options.Concat<IOption>(_dataSource.PerLanguageOptions)
.Select(o => TaggerEventSources.OnOptionChanged(_subjectBuffer, o, TaggerDelay.NearImmediate)).ToList();
if (optionChangedEventSources.Count == 0)
{
// No options specified for this tagger. So just keep the event source as is.
return eventSource;
}
optionChangedEventSources.Add(eventSource);
return TaggerEventSources.Compose(optionChangedEventSources);
}
private TextChangeRange? AccumulatedTextChanges
{
get
{
_workQueue.AssertIsForeground();
return _accumulatedTextChanges_doNotAccessDirectly;
}
set
{
_workQueue.AssertIsForeground();
_accumulatedTextChanges_doNotAccessDirectly = value;
}
}
private ImmutableDictionary<ITextBuffer, TagSpanIntervalTree<TTag>> CachedTagTrees
{
get
{
_workQueue.AssertIsForeground();
return _cachedTagTrees_doNotAccessDirectly;
}
set
{
_workQueue.AssertIsForeground();
_cachedTagTrees_doNotAccessDirectly = value;
}
}
private object State
{
get
{
_workQueue.AssertIsForeground();
return _state_doNotAccessDirecty;
}
set
{
_workQueue.AssertIsForeground();
_state_doNotAccessDirecty = value;
}
}
private bool UpToDate
{
get
{
_workQueue.AssertIsForeground();
return _upToDate_doNotAccessDirectly;
}
set
{
_workQueue.AssertIsForeground();
_upToDate_doNotAccessDirectly = value;
}
}
public void RegisterNotification(Action action, int delay, CancellationToken cancellationToken)
=> _notificationService.RegisterNotification(action, delay, _asyncListener.BeginAsyncOperation(typeof(TTag).Name), cancellationToken);
private void Connect()
{
_workQueue.AssertIsForeground();
_eventSource.Changed += OnEventSourceChanged;
_eventSource.UIUpdatesResumed += OnUIUpdatesResumed;
_eventSource.UIUpdatesPaused += OnUIUpdatesPaused;
if (_dataSource.TextChangeBehavior.HasFlag(TaggerTextChangeBehavior.TrackTextChanges))
{
_subjectBuffer.Changed += OnSubjectBufferChanged;
}
if (_dataSource.CaretChangeBehavior.HasFlag(TaggerCaretChangeBehavior.RemoveAllTagsOnCaretMoveOutsideOfTag))
{
if (_textViewOpt == null)
{
throw new ArgumentException(
nameof(_dataSource.CaretChangeBehavior) + " can only be specified for an " + nameof(IViewTaggerProvider));
}
_textViewOpt.Caret.PositionChanged += OnCaretPositionChanged;
}
// Tell the interaction object to start issuing events.
_eventSource.Connect();
}
public void Disconnect()
{
_workQueue.AssertIsForeground();
_workQueue.CancelCurrentWork(remainCancelled: true);
// Tell the interaction object to stop issuing events.
_eventSource.Disconnect();
if (_dataSource.CaretChangeBehavior.HasFlag(TaggerCaretChangeBehavior.RemoveAllTagsOnCaretMoveOutsideOfTag))
{
_textViewOpt.Caret.PositionChanged -= OnCaretPositionChanged;
}
if (_dataSource.TextChangeBehavior.HasFlag(TaggerTextChangeBehavior.TrackTextChanges))
{
_subjectBuffer.Changed -= OnSubjectBufferChanged;
}
_eventSource.UIUpdatesPaused -= OnUIUpdatesPaused;
_eventSource.UIUpdatesResumed -= OnUIUpdatesResumed;
_eventSource.Changed -= OnEventSourceChanged;
}
private void RaiseTagsChanged(ITextBuffer buffer, DiffResult difference)
{
this.AssertIsForeground();
if (difference.Count == 0)
{
// nothing changed.
return;
}
RaiseTagsChanged(SpecializedCollections.SingletonCollection(
new KeyValuePair<ITextBuffer, DiffResult>(buffer, difference)),
initialTags: false);
}
private void RaiseTagsChanged(
ICollection<KeyValuePair<ITextBuffer, DiffResult>> collection, bool initialTags)
{
TagsChangedForBuffer?.Invoke(collection, initialTags);
}
private void RaisePaused()
=> this.Paused?.Invoke(this, EventArgs.Empty);
private void RaiseResumed()
=> this.Resumed?.Invoke(this, EventArgs.Empty);
private static T NextOrDefault<T>(IEnumerator<T> enumerator)
=> enumerator.MoveNext() ? enumerator.Current : default;
/// <summary>
/// Return all the spans that appear in only one of "latestSpans" or "previousSpans".
/// </summary>
private static DiffResult Difference<T>(IEnumerable<ITagSpan<T>> latestSpans, IEnumerable<ITagSpan<T>> previousSpans, IEqualityComparer<T> comparer)
where T : ITag
{
using var addedPool = SharedPools.Default<List<SnapshotSpan>>().GetPooledObject();
using var removedPool = SharedPools.Default<List<SnapshotSpan>>().GetPooledObject();
using var latestEnumerator = latestSpans.GetEnumerator();
using var previousEnumerator = previousSpans.GetEnumerator();
var added = addedPool.Object;
var removed = removedPool.Object;
var latest = NextOrDefault(latestEnumerator);
var previous = NextOrDefault(previousEnumerator);
while (latest != null && previous != null)
{
var latestSpan = latest.Span;
var previousSpan = previous.Span;
if (latestSpan.Start < previousSpan.Start)
{
added.Add(latestSpan);
latest = NextOrDefault(latestEnumerator);
}
else if (previousSpan.Start < latestSpan.Start)
{
removed.Add(previousSpan);
previous = NextOrDefault(previousEnumerator);
}
else
{
// If the starts are the same, but the ends are different, report the larger
// region to be conservative.
if (previousSpan.End > latestSpan.End)
{
removed.Add(previousSpan);
latest = NextOrDefault(latestEnumerator);
}
else if (latestSpan.End > previousSpan.End)
{
added.Add(latestSpan);
previous = NextOrDefault(previousEnumerator);
}
else
{
if (!comparer.Equals(latest.Tag, previous.Tag))
{
added.Add(latestSpan);
}
latest = NextOrDefault(latestEnumerator);
previous = NextOrDefault(previousEnumerator);
}
}
}
while (latest != null)
{
added.Add(latest.Span);
latest = NextOrDefault(latestEnumerator);
}
while (previous != null)
{
removed.Add(previous.Span);
previous = NextOrDefault(previousEnumerator);
}
return new DiffResult(added, removed);
}
}
}
}
|
package org.zstack.header.volume;
import org.zstack.header.search.APISearchReply;
public class APISearchVolumeReply extends APISearchReply {
public static APISearchVolumeReply __example__() {
APISearchVolumeReply reply = new APISearchVolumeReply();
return reply;
}
}
|
class ZshLovers < Formula
desc "Tips, tricks, and examples for zsh"
homepage "https://grml.org/zsh/#zshlovers"
url "https://deb.grml.org/pool/main/z/zsh-lovers/zsh-lovers_0.9.1_all.deb"
sha256 "011b7931a555c77e98aa9cdd16b3c4670c0e0e3b5355e5fd60188885a6678de8"
livecheck do
url "https://deb.grml.org/pool/main/z/zsh-lovers/"
regex(/href=.*?zsh-lovers[._-]v?(\d+(?:\.\d+)+)[._-]all/i)
end
bottle do
sha256 cellar: :any_skip_relocation, all: "a9a640ed5452e086874d853453e15cbd2e347a5a86d867db12a5245980f6aa54"
end
def install
system "tar", "xf", "zsh-lovers_#{version}_all.deb"
system "tar", "xf", "data.tar.xz"
system "gunzip", *Dir["usr/**/*.gz"]
prefix.install_metafiles "usr/share/doc/zsh-lovers"
prefix.install "usr/share"
end
test do
system "man", "zsh-lovers"
end
end
|
#ifndef STD_HASH_HPP
#define STD_HASH_HPP
#include <functional>
// this is largely inspired by boost's hash combine as can be found in
// "The C++ Standard Library" 2nd Edition. Nicolai M. Josuttis. 2012.
template <typename T> void hash_combine(std::size_t &seed, const T &val)
{
seed ^= std::hash<T>()(val) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
template <typename T> void hash_val(std::size_t &seed, const T &val) { hash_combine(seed, val); }
template <typename T, typename... Types>
void hash_val(std::size_t &seed, const T &val, const Types &... args)
{
hash_combine(seed, val);
hash_val(seed, args...);
}
template <typename... Types> std::size_t hash_val(const Types &... args)
{
std::size_t seed = 0;
hash_val(seed, args...);
return seed;
}
namespace std
{
template <typename T1, typename T2> struct hash<std::pair<T1, T2>>
{
size_t operator()(const std::pair<T1, T2> &pair) const
{
return hash_val(pair.first, pair.second);
}
};
}
#endif // STD_HASH_HPP
|
/***************************************************************************
Copyright (c) 2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define HAVE_KERNEL_4x4 1
static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT *a3, FLOAT *x, FLOAT *y, FLOAT *temp1, FLOAT *temp2) __attribute__ ((noinline));
static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT *a3, FLOAT *x, FLOAT *y, FLOAT *temp1, FLOAT *temp2)
{
BLASLONG register i = 0;
__asm__ __volatile__
(
"vzeroupper \n\t"
"vxorpd %%ymm0 , %%ymm0 , %%ymm0 \n\t" // temp2[0]
"vxorpd %%ymm1 , %%ymm1 , %%ymm1 \n\t" // temp2[1]
"vxorpd %%ymm2 , %%ymm2 , %%ymm2 \n\t" // temp2[2]
"vxorpd %%ymm3 , %%ymm3 , %%ymm3 \n\t" // temp2[3]
"vbroadcastsd (%8), %%ymm4 \n\t" // temp1[0]
"vbroadcastsd 8(%8), %%ymm5 \n\t" // temp1[1]
"vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1]
"vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1]
"xorq %0,%0 \n\t"
".align 16 \n\t"
"1: \n\t"
"vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y
"vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x
"vmovups (%4,%0,8), %%ymm12 \n\t" // 2 * a
"vmovups (%5,%0,8), %%ymm13 \n\t" // 2 * a
"vmovups (%6,%0,8), %%ymm14 \n\t" // 2 * a
"vmovups (%7,%0,8), %%ymm15 \n\t" // 2 * a
"vmulpd %%ymm4, %%ymm12, %%ymm10 \n\t"
"vaddpd %%ymm9, %%ymm10, %%ymm9 \n\t"
"vmulpd %%ymm8, %%ymm12, %%ymm11 \n\t"
"vaddpd %%ymm0, %%ymm11, %%ymm0 \n\t"
"vmulpd %%ymm5, %%ymm13, %%ymm10 \n\t"
"vaddpd %%ymm9, %%ymm10, %%ymm9 \n\t"
"vmulpd %%ymm8, %%ymm13, %%ymm11 \n\t"
"vaddpd %%ymm1, %%ymm11, %%ymm1 \n\t"
"vmulpd %%ymm6, %%ymm14, %%ymm10 \n\t"
"vaddpd %%ymm9, %%ymm10, %%ymm9 \n\t"
"vmulpd %%ymm8, %%ymm14, %%ymm11 \n\t"
"vaddpd %%ymm2, %%ymm11, %%ymm2 \n\t"
"vmulpd %%ymm7, %%ymm15, %%ymm10 \n\t"
"vaddpd %%ymm9, %%ymm10, %%ymm9 \n\t"
"vmulpd %%ymm8, %%ymm15, %%ymm11 \n\t"
"vaddpd %%ymm3, %%ymm11, %%ymm3 \n\t"
"addq $4 , %0 \n\t"
"subq $4 , %1 \n\t"
"vmovups %%ymm9 , -32(%3,%0,8) \n\t"
"jnz 1b \n\t"
"vmovsd (%9), %%xmm4 \n\t"
"vmovsd 8(%9), %%xmm5 \n\t"
"vmovsd 16(%9), %%xmm6 \n\t"
"vmovsd 24(%9), %%xmm7 \n\t"
"vextractf128 $0x01, %%ymm0 , %%xmm12 \n\t"
"vextractf128 $0x01, %%ymm1 , %%xmm13 \n\t"
"vextractf128 $0x01, %%ymm2 , %%xmm14 \n\t"
"vextractf128 $0x01, %%ymm3 , %%xmm15 \n\t"
"vaddpd %%xmm0, %%xmm12, %%xmm0 \n\t"
"vaddpd %%xmm1, %%xmm13, %%xmm1 \n\t"
"vaddpd %%xmm2, %%xmm14, %%xmm2 \n\t"
"vaddpd %%xmm3, %%xmm15, %%xmm3 \n\t"
"vhaddpd %%xmm0, %%xmm0, %%xmm0 \n\t"
"vhaddpd %%xmm1, %%xmm1, %%xmm1 \n\t"
"vhaddpd %%xmm2, %%xmm2, %%xmm2 \n\t"
"vhaddpd %%xmm3, %%xmm3, %%xmm3 \n\t"
"vaddsd %%xmm4, %%xmm0, %%xmm0 \n\t"
"vaddsd %%xmm5, %%xmm1, %%xmm1 \n\t"
"vaddsd %%xmm6, %%xmm2, %%xmm2 \n\t"
"vaddsd %%xmm7, %%xmm3, %%xmm3 \n\t"
"vmovsd %%xmm0 , (%9) \n\t" // save temp2
"vmovsd %%xmm1 , 8(%9) \n\t" // save temp2
"vmovsd %%xmm2 ,16(%9) \n\t" // save temp2
"vmovsd %%xmm3 ,24(%9) \n\t" // save temp2
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"r" (x), // 2
"r" (y), // 3
"r" (a0), // 4
"r" (a1), // 5
"r" (a2), // 6
"r" (a3), // 8
"r" (temp1), // 8
"r" (temp2) // 9
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
"memory"
);
}
|
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2015, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Jonathan Goeke <goeke@molgen.mpg.de>
// ==========================================================================
// Tests for SeqAn's module statistics (markov model)
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/file.h>
#include <seqan/statistics.h> // The module under test.
#include "test_statistics_markov_model.h"
#include "test_statistics_base.h"
SEQAN_BEGIN_TESTSUITE(test_statistics)
{
// Call Tests.
SEQAN_CALL_TEST(test_statistics_markov_model);
SEQAN_CALL_TEST(test_statistics_statistics);
}
SEQAN_END_TESTSUITE
|
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/chromeos/file_system_provider/operations/execute_action.h"
#include <algorithm>
#include <string>
#include "chrome/common/extensions/api/file_system_provider.h"
#include "chrome/common/extensions/api/file_system_provider_internal.h"
namespace chromeos {
namespace file_system_provider {
namespace operations {
ExecuteAction::ExecuteAction(
extensions::EventRouter* event_router,
const ProvidedFileSystemInfo& file_system_info,
const base::FilePath& entry_path,
const std::string& action_id,
const storage::AsyncFileUtil::StatusCallback& callback)
: Operation(event_router, file_system_info),
entry_path_(entry_path),
action_id_(action_id),
callback_(callback) {
}
ExecuteAction::~ExecuteAction() {
}
bool ExecuteAction::Execute(int request_id) {
using extensions::api::file_system_provider::ExecuteActionRequestedOptions;
ExecuteActionRequestedOptions options;
options.file_system_id = file_system_info_.file_system_id();
options.request_id = request_id;
options.entry_path = entry_path_.AsUTF8Unsafe();
options.action_id = action_id_;
return SendEvent(
request_id,
extensions::events::FILE_SYSTEM_PROVIDER_ON_EXECUTE_ACTION_REQUESTED,
extensions::api::file_system_provider::OnExecuteActionRequested::
kEventName,
extensions::api::file_system_provider::OnExecuteActionRequested::Create(
options));
}
void ExecuteAction::OnSuccess(int /* request_id */,
scoped_ptr<RequestValue> result,
bool has_more) {
callback_.Run(base::File::FILE_OK);
}
void ExecuteAction::OnError(int /* request_id */,
scoped_ptr<RequestValue> /* result */,
base::File::Error error) {
callback_.Run(error);
}
} // namespace operations
} // namespace file_system_provider
} // namespace chromeos
|
<!--
Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
for details. All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
-->
<div class="spinner">
<div class="circle left"></div>
<div class="circle right"></div>
<div class="circle gap"></div>
</div>
|
<!DOCTYPE html><!-- DO NOT EDIT. This file auto-generated by generate_closure_unit_tests.js --><!--
Copyright 2017 The Closure Library Authors. All Rights Reserved.
Use of this source code is governed by the Apache License, Version 2.0.
See the COPYING file for details.
--><html><head><meta charset="UTF-8">
<script src="../base.js"></script>
<script>goog.require('goog.math.Vec2Test');</script>
<title>Closure Unit Tests - goog.math.Vec2Test</title></head><body></body></html> |
// Filename: bioStream.h
// Created by: drose (25Sep02)
//
////////////////////////////////////////////////////////////////////
//
// PANDA 3D SOFTWARE
// Copyright (c) Carnegie Mellon University. All rights reserved.
//
// All use of this software is subject to the terms of the revised BSD
// license. You should have received a copy of this license along
// with this source code in a file named "LICENSE."
//
////////////////////////////////////////////////////////////////////
#ifndef BIOSTREAM_H
#define BIOSTREAM_H
#include "pandabase.h"
// This module is not compiled if OpenSSL is not available.
#ifdef HAVE_OPENSSL
#include "socketStream.h"
#include "bioStreamBuf.h"
////////////////////////////////////////////////////////////////////
// Class : IBioStream
// Description : An input stream object that reads data from an
// OpenSSL BIO object. This is used by the HTTPClient
// and HTTPChannel classes to provide a C++ interface
// to OpenSSL.
//
// Seeking is not supported.
////////////////////////////////////////////////////////////////////
class EXPCL_PANDAEXPRESS IBioStream : public ISocketStream {
public:
INLINE IBioStream();
INLINE IBioStream(BioPtr *source);
INLINE IBioStream &open(BioPtr *source);
virtual bool is_closed();
virtual void close();
virtual ReadState get_read_state();
private:
BioStreamBuf _buf;
};
////////////////////////////////////////////////////////////////////
// Class : OBioStream
// Description : An output stream object that writes data to an
// OpenSSL BIO object. This is used by the HTTPClient
// and HTTPChannel classes to provide a C++ interface
// to OpenSSL.
//
// Seeking is not supported.
////////////////////////////////////////////////////////////////////
class EXPCL_PANDAEXPRESS OBioStream : public OSocketStream {
public:
INLINE OBioStream();
INLINE OBioStream(BioPtr *source);
INLINE OBioStream &open(BioPtr *source);
virtual bool is_closed();
virtual void close();
private:
BioStreamBuf _buf;
};
////////////////////////////////////////////////////////////////////
// Class : BioStream
// Description : A bi-directional stream object that reads and writes
// data to an OpenSSL BIO object.
////////////////////////////////////////////////////////////////////
class EXPCL_PANDAEXPRESS BioStream : public SocketStream {
public:
INLINE BioStream();
INLINE BioStream(BioPtr *source);
INLINE BioStream &open(BioPtr *source);
virtual bool is_closed();
virtual void close();
private:
BioStreamBuf _buf;
};
#include "bioStream.I"
#endif // HAVE_OPENSSL
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.