code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2015 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
BUFFER='\sh'
expected_region_highlight=(
"1 3 command" # \sh (runs 'sh', bypassing aliases)
)
| codeprimate/arid | zsh/zsh-syntax-highlighting/highlighters/main/test-data/unbackslash.zsh | Shell | bsd-2-clause | 2,090 |
require 'shellwords'
require 'optparse'
require 'rake/task_manager'
require 'rake/file_list'
require 'rake/thread_pool'
require 'rake/thread_history_display'
require 'rake/trace_output'
require 'rake/win32'
module Rake
CommandLineOptionError = Class.new(StandardError)
##
# Rake main application object. When invoking +rake+ from the
# command line, a Rake::Application object is created and run.
class Application
include TaskManager
include TraceOutput
# The name of the application (typically 'rake')
attr_reader :name
# The original directory where rake was invoked.
attr_reader :original_dir
# Name of the actual rakefile used.
attr_reader :rakefile
# Number of columns on the terminal
attr_accessor :terminal_columns
# List of the top level task names (task names from the command line).
attr_reader :top_level_tasks
DEFAULT_RAKEFILES = [
'rakefile',
'Rakefile',
'rakefile.rb',
'Rakefile.rb'
].freeze
# Initialize a Rake::Application object.
def initialize
super
@name = 'rake'
@rakefiles = DEFAULT_RAKEFILES.dup
@rakefile = nil
@pending_imports = []
@imported = []
@loaders = {}
@default_loader = Rake::DefaultLoader.new
@original_dir = Dir.pwd
@top_level_tasks = []
add_loader('rb', DefaultLoader.new)
add_loader('rf', DefaultLoader.new)
add_loader('rake', DefaultLoader.new)
@tty_output = STDOUT.tty?
@terminal_columns = ENV['RAKE_COLUMNS'].to_i
end
# Run the Rake application. The run method performs the following
# three steps:
#
# * Initialize the command line options (+init+).
# * Define the tasks (+load_rakefile+).
# * Run the top level tasks (+top_level+).
#
# If you wish to build a custom rake command, you should call
# +init+ on your application. Then define any tasks. Finally,
# call +top_level+ to run your top level tasks.
def run
standard_exception_handling do
init
load_rakefile
top_level
end
end
# Initialize the command line parameters and app name.
def init(app_name='rake')
standard_exception_handling do
@name = app_name
args = handle_options
collect_command_line_tasks(args)
end
end
# Find the rakefile and then load it and any pending imports.
def load_rakefile
standard_exception_handling do
raw_load_rakefile
end
end
# Run the top level tasks of a Rake application.
def top_level
run_with_threads do
if options.show_tasks
display_tasks_and_comments
elsif options.show_prereqs
display_prerequisites
else
top_level_tasks.each { |task_name| invoke_task(task_name) }
end
end
end
# Run the given block with the thread startup and shutdown.
def run_with_threads
thread_pool.gather_history if options.job_stats == :history
yield
thread_pool.join
if options.job_stats
stats = thread_pool.statistics
puts "Maximum active threads: #{stats[:max_active_threads]} + main"
puts "Total threads in play: #{stats[:total_threads_in_play]} + main"
end
ThreadHistoryDisplay.new(thread_pool.history).show if
options.job_stats == :history
end
# Add a loader to handle imported files ending in the extension
# +ext+.
def add_loader(ext, loader)
ext = ".#{ext}" unless ext =~ /^\./
@loaders[ext] = loader
end
# Application options from the command line
def options
@options ||= OpenStruct.new
end
# Return the thread pool used for multithreaded processing.
def thread_pool # :nodoc:
@thread_pool ||= ThreadPool.new(options.thread_pool_size || Rake.suggested_thread_count-1)
end
# internal ----------------------------------------------------------------
# Invokes a task with arguments that are extracted from +task_string+
def invoke_task(task_string) # :nodoc:
name, args = parse_task_string(task_string)
t = self[name]
t.invoke(*args)
end
def parse_task_string(string) # :nodoc:
/^([^\[]+)(?:\[(.*)\])$/ =~ string.to_s
name = $1
remaining_args = $2
return string, [] unless name
return name, [] if remaining_args.empty?
args = []
begin
/((?:[^\\,]|\\.)*?)\s*(?:,\s*(.*))?$/ =~ remaining_args
remaining_args = $2
args << $1.gsub(/\\(.)/, '\1')
end while remaining_args
return name, args
end
# Provide standard exception handling for the given block.
def standard_exception_handling # :nodoc:
yield
rescue SystemExit
# Exit silently with current status
raise
rescue OptionParser::InvalidOption => ex
$stderr.puts ex.message
exit(false)
rescue Exception => ex
# Exit with error message
display_error_message(ex)
exit_because_of_exception(ex)
end
# Exit the program because of an unhandled exception.
# (may be overridden by subclasses)
def exit_because_of_exception(ex) # :nodoc:
exit(false)
end
# Display the error message that caused the exception.
def display_error_message(ex) # :nodoc:
trace "#{name} aborted!"
display_exception_details(ex)
trace "Tasks: #{ex.chain}" if has_chain?(ex)
trace "(See full trace by running task with --trace)" unless
options.backtrace
end
def display_exception_details(ex) # :nodoc:
seen = Thread.current[:rake_display_exception_details_seen] ||= []
return if seen.include? ex
seen << ex
display_exception_message_details(ex)
display_exception_backtrace(ex)
display_exception_details(ex.cause) if has_cause?(ex)
end
def has_cause?(ex) # :nodoc:
ex.respond_to?(:cause) && ex.cause
end
def display_exception_message_details(ex) # :nodoc:
if ex.instance_of?(RuntimeError)
trace ex.message
else
trace "#{ex.class.name}: #{ex.message}"
end
end
def display_exception_backtrace(ex) # :nodoc:
if options.backtrace
trace ex.backtrace.join("\n")
else
trace Backtrace.collapse(ex.backtrace).join("\n")
end
end
# Warn about deprecated usage.
#
# Example:
# Rake.application.deprecate("import", "Rake.import", caller.first)
#
def deprecate(old_usage, new_usage, call_site) # :nodoc:
unless options.ignore_deprecate
$stderr.puts "WARNING: '#{old_usage}' is deprecated. " +
"Please use '#{new_usage}' instead.\n" +
" at #{call_site}"
end
end
# Does the exception have a task invocation chain?
def has_chain?(exception) # :nodoc:
exception.respond_to?(:chain) && exception.chain
end
private :has_chain?
# True if one of the files in RAKEFILES is in the current directory.
# If a match is found, it is copied into @rakefile.
def have_rakefile # :nodoc:
@rakefiles.each do |fn|
if File.exist?(fn)
others = FileList.glob(fn, File::FNM_CASEFOLD)
return others.size == 1 ? others.first : fn
elsif fn == ''
return fn
end
end
return nil
end
# True if we are outputting to TTY, false otherwise
def tty_output? # :nodoc:
@tty_output
end
# Override the detected TTY output state (mostly for testing)
def tty_output=(tty_output_state) # :nodoc:
@tty_output = tty_output_state
end
# We will truncate output if we are outputting to a TTY or if we've been
# given an explicit column width to honor
def truncate_output? # :nodoc:
tty_output? || @terminal_columns.nonzero?
end
# Display the tasks and comments.
def display_tasks_and_comments # :nodoc:
displayable_tasks = tasks.select { |t|
(options.show_all_tasks || t.comment) &&
t.name =~ options.show_task_pattern
}
case options.show_tasks
when :tasks
width = displayable_tasks.map { |t| t.name_with_args.length }.max || 10
if truncate_output?
max_column = terminal_width - name.size - width - 7
else
max_column = nil
end
displayable_tasks.each do |t|
printf("#{name} %-#{width}s # %s\n",
t.name_with_args,
max_column ? truncate(t.comment, max_column) : t.comment)
end
when :describe
displayable_tasks.each do |t|
puts "#{name} #{t.name_with_args}"
comment = t.full_comment || ""
comment.split("\n").each do |line|
puts " #{line}"
end
puts
end
when :lines
displayable_tasks.each do |t|
t.locations.each do |loc|
printf "#{name} %-30s %s\n", t.name_with_args, loc
end
end
else
fail "Unknown show task mode: '#{options.show_tasks}'"
end
end
def terminal_width # :nodoc:
if @terminal_columns.nonzero?
result = @terminal_columns
else
result = unix? ? dynamic_width : 80
end
(result < 10) ? 80 : result
rescue
80
end
# Calculate the dynamic width of the
def dynamic_width # :nodoc:
@dynamic_width ||= (dynamic_width_stty.nonzero? || dynamic_width_tput)
end
def dynamic_width_stty # :nodoc:
%x{stty size 2>/dev/null}.split[1].to_i
end
def dynamic_width_tput # :nodoc:
%x{tput cols 2>/dev/null}.to_i
end
def unix? # :nodoc:
RbConfig::CONFIG['host_os'] =~
/(aix|darwin|linux|(net|free|open)bsd|cygwin|solaris|irix|hpux)/i
end
def windows? # :nodoc:
Win32.windows?
end
def truncate(string, width) # :nodoc:
if string.nil?
""
elsif string.length <= width
string
else
(string[0, width - 3] || "") + "..."
end
end
# Display the tasks and prerequisites
def display_prerequisites # :nodoc:
tasks.each do |t|
puts "#{name} #{t.name}"
t.prerequisites.each { |pre| puts " #{pre}" }
end
end
def trace(*strings) # :nodoc:
options.trace_output ||= $stderr
trace_on(options.trace_output, *strings)
end
def sort_options(options) # :nodoc:
options.sort_by { |opt|
opt.select { |o| o =~ /^-/ }.map { |o| o.downcase }.sort.reverse
}
end
private :sort_options
# A list of all the standard options used in rake, suitable for
# passing to OptionParser.
def standard_rake_options # :nodoc:
sort_options(
[
['--all', '-A',
"Show all tasks, even uncommented ones (in combination with -T or -D)",
lambda { |value|
options.show_all_tasks = value
}
],
['--backtrace=[OUT]',
"Enable full backtrace. OUT can be stderr (default) or stdout.",
lambda { |value|
options.backtrace = true
select_trace_output(options, 'backtrace', value)
}
],
['--build-all', '-B',
"Build all prerequisites, including those which are up-to-date.",
lambda { |value|
options.build_all = true
}
],
['--comments',
"Show commented tasks only",
lambda { |value|
options.show_all_tasks = !value
}
],
['--describe', '-D [PATTERN]',
"Describe the tasks (matching optional PATTERN), then exit.",
lambda { |value|
select_tasks_to_show(options, :describe, value)
}
],
['--dry-run', '-n',
"Do a dry run without executing actions.",
lambda { |value|
Rake.verbose(true)
Rake.nowrite(true)
options.dryrun = true
options.trace = true
}
],
['--execute', '-e CODE',
"Execute some Ruby code and exit.",
lambda { |value|
eval(value)
exit
}
],
['--execute-print', '-p CODE',
"Execute some Ruby code, print the result, then exit.",
lambda { |value|
puts eval(value)
exit
}
],
['--execute-continue', '-E CODE',
"Execute some Ruby code, " +
"then continue with normal task processing.",
lambda { |value| eval(value) }
],
['--jobs', '-j [NUMBER]',
"Specifies the maximum number of tasks to execute in parallel. " +
"(default is number of CPU cores + 4)",
lambda { |value|
if value.nil? || value == ''
value = FIXNUM_MAX
elsif value =~ /^\d+$/
value = value.to_i
else
value = Rake.suggested_thread_count
end
value = 1 if value < 1
options.thread_pool_size = value - 1
}
],
['--job-stats [LEVEL]',
"Display job statistics. " +
"LEVEL=history displays a complete job list",
lambda { |value|
if value =~ /^history/i
options.job_stats = :history
else
options.job_stats = true
end
}
],
['--libdir', '-I LIBDIR',
"Include LIBDIR in the search path for required modules.",
lambda { |value| $:.push(value) }
],
['--multitask', '-m',
"Treat all tasks as multitasks.",
lambda { |value| options.always_multitask = true }
],
['--no-search', '--nosearch',
'-N', "Do not search parent directories for the Rakefile.",
lambda { |value| options.nosearch = true }
],
['--prereqs', '-P',
"Display the tasks and dependencies, then exit.",
lambda { |value| options.show_prereqs = true }
],
['--quiet', '-q',
"Do not log messages to standard output.",
lambda { |value| Rake.verbose(false) }
],
['--rakefile', '-f [FILENAME]',
"Use FILENAME as the rakefile to search for.",
lambda { |value|
value ||= ''
@rakefiles.clear
@rakefiles << value
}
],
['--rakelibdir', '--rakelib', '-R RAKELIBDIR',
"Auto-import any .rake files in RAKELIBDIR. " +
"(default is 'rakelib')",
lambda { |value|
options.rakelib = value.split(File::PATH_SEPARATOR)
}
],
['--require', '-r MODULE',
"Require MODULE before executing rakefile.",
lambda { |value|
begin
require value
rescue LoadError => ex
begin
rake_require value
rescue LoadError
raise ex
end
end
}
],
['--rules',
"Trace the rules resolution.",
lambda { |value| options.trace_rules = true }
],
['--silent', '-s',
"Like --quiet, but also suppresses the " +
"'in directory' announcement.",
lambda { |value|
Rake.verbose(false)
options.silent = true
}
],
['--suppress-backtrace PATTERN',
"Suppress backtrace lines matching regexp PATTERN. " +
"Ignored if --trace is on.",
lambda { |value|
options.suppress_backtrace_pattern = Regexp.new(value)
}
],
['--system', '-g',
"Using system wide (global) rakefiles " +
"(usually '~/.rake/*.rake').",
lambda { |value| options.load_system = true }
],
['--no-system', '--nosystem', '-G',
"Use standard project Rakefile search paths, " +
"ignore system wide rakefiles.",
lambda { |value| options.ignore_system = true }
],
['--tasks', '-T [PATTERN]',
"Display the tasks (matching optional PATTERN) " +
"with descriptions, then exit.",
lambda { |value|
select_tasks_to_show(options, :tasks, value)
}
],
['--trace=[OUT]', '-t',
"Turn on invoke/execute tracing, enable full backtrace. " +
"OUT can be stderr (default) or stdout.",
lambda { |value|
options.trace = true
options.backtrace = true
select_trace_output(options, 'trace', value)
Rake.verbose(true)
}
],
['--verbose', '-v',
"Log message to standard output.",
lambda { |value| Rake.verbose(true) }
],
['--version', '-V',
"Display the program version.",
lambda { |value|
puts "rake, version #{RAKEVERSION}"
exit
}
],
['--where', '-W [PATTERN]',
"Describe the tasks (matching optional PATTERN), then exit.",
lambda { |value|
select_tasks_to_show(options, :lines, value)
options.show_all_tasks = true
}
],
['--no-deprecation-warnings', '-X',
"Disable the deprecation warnings.",
lambda { |value|
options.ignore_deprecate = true
}
],
])
end
def select_tasks_to_show(options, show_tasks, value) # :nodoc:
options.show_tasks = show_tasks
options.show_task_pattern = Regexp.new(value || '')
Rake::TaskManager.record_task_metadata = true
end
private :select_tasks_to_show
def select_trace_output(options, trace_option, value) # :nodoc:
value = value.strip unless value.nil?
case value
when 'stdout'
options.trace_output = $stdout
when 'stderr', nil
options.trace_output = $stderr
else
fail CommandLineOptionError,
"Unrecognized --#{trace_option} option '#{value}'"
end
end
private :select_trace_output
# Read and handle the command line options. Returns the command line
# arguments that we didn't understand, which should (in theory) be just
# task names and env vars.
def handle_options # :nodoc:
options.rakelib = ['rakelib']
options.trace_output = $stderr
OptionParser.new do |opts|
opts.banner = "#{Rake.application.name} [-f rakefile] {options} targets..."
opts.separator ""
opts.separator "Options are ..."
opts.on_tail("-h", "--help", "-H", "Display this help message.") do
puts opts
exit
end
standard_rake_options.each { |args| opts.on(*args) }
opts.environment('RAKEOPT')
end.parse(ARGV)
end
# Similar to the regular Ruby +require+ command, but will check
# for *.rake files in addition to *.rb files.
def rake_require(file_name, paths=$LOAD_PATH, loaded=$") # :nodoc:
fn = file_name + ".rake"
return false if loaded.include?(fn)
paths.each do |path|
full_path = File.join(path, fn)
if File.exist?(full_path)
Rake.load_rakefile(full_path)
loaded << fn
return true
end
end
fail LoadError, "Can't find #{file_name}"
end
def find_rakefile_location # :nodoc:
here = Dir.pwd
until (fn = have_rakefile)
Dir.chdir("..")
return nil if Dir.pwd == here || options.nosearch
here = Dir.pwd
end
[fn, here]
ensure
Dir.chdir(Rake.original_dir)
end
def print_rakefile_directory(location) # :nodoc:
$stderr.puts "(in #{Dir.pwd})" unless
options.silent or original_dir == location
end
def raw_load_rakefile # :nodoc:
rakefile, location = find_rakefile_location
if (! options.ignore_system) &&
(options.load_system || rakefile.nil?) &&
system_dir && File.directory?(system_dir)
print_rakefile_directory(location)
glob("#{system_dir}/*.rake") do |name|
add_import name
end
else
fail "No Rakefile found (looking for: #{@rakefiles.join(', ')})" if
rakefile.nil?
@rakefile = rakefile
Dir.chdir(location)
print_rakefile_directory(location)
Rake.load_rakefile(File.expand_path(@rakefile)) if
@rakefile && @rakefile != ''
options.rakelib.each do |rlib|
glob("#{rlib}/*.rake") do |name|
add_import name
end
end
end
load_imports
end
def glob(path, &block) # :nodoc:
FileList.glob(path.gsub("\\", '/')).each(&block)
end
private :glob
# The directory path containing the system wide rakefiles.
def system_dir # :nodoc:
@system_dir ||=
begin
if ENV['RAKE_SYSTEM']
ENV['RAKE_SYSTEM']
else
standard_system_dir
end
end
end
# The standard directory containing system wide rake files.
if Win32.windows?
def standard_system_dir #:nodoc:
Win32.win32_system_dir
end
else
def standard_system_dir #:nodoc:
File.join(File.expand_path('~'), '.rake')
end
end
private :standard_system_dir
# Collect the list of tasks on the command line. If no tasks are
# given, return a list containing only the default task.
# Environmental assignments are processed at this time as well.
#
# `args` is the list of arguments to peruse to get the list of tasks.
# It should be the command line that was given to rake, less any
# recognised command-line options, which OptionParser.parse will
# have taken care of already.
def collect_command_line_tasks(args) # :nodoc:
@top_level_tasks = []
args.each do |arg|
if arg =~ /^(\w+)=(.*)$/m
ENV[$1] = $2
else
@top_level_tasks << arg unless arg =~ /^-/
end
end
@top_level_tasks.push(default_task_name) if @top_level_tasks.empty?
end
# Default task name ("default").
# (May be overridden by subclasses)
def default_task_name # :nodoc:
"default"
end
# Add a file to the list of files to be imported.
def add_import(fn) # :nodoc:
@pending_imports << fn
end
# Load the pending list of imported files.
def load_imports # :nodoc:
while fn = @pending_imports.shift
next if @imported.member?(fn)
fn_task = lookup(fn) and fn_task.invoke
ext = File.extname(fn)
loader = @loaders[ext] || @default_loader
loader.load(fn)
if fn_task = lookup(fn) and fn_task.needed?
fn_task.reenable
fn_task.invoke
loader.load(fn)
end
@imported << fn
end
end
def rakefile_location(backtrace=caller) # :nodoc:
backtrace.map { |t| t[/([^:]+):/, 1] }
re = /^#{@rakefile}$/
re = /#{re.source}/i if windows?
backtrace.find { |str| str =~ re } || ''
end
private
FIXNUM_MAX = (2**(0.size * 8 - 2) - 1) # :nodoc:
end
end
| jingyu91/jingyu91.github.io | vendor/cache/ruby/2.3.0/gems/rake-10.5.0/lib/rake/application.rb | Ruby | mit | 23,699 |
function gi() { curl http://gitignore.io/api/$@ ;}
_gitignireio_get_command_list() {
curl -s http://gitignore.io/api/list | tr "," "\n"
}
_gitignireio () {
compset -P '*,'
compadd -S '' `_gitignireio_get_command_list`
}
compdef _gitignireio gi | jeanlauliac/oh-my-zsh | plugins/gitignore/gitignore.plugin.zsh | Shell | mit | 252 |
import {Date, DateWrapper} from 'angular2/src/facade/lang';
import {Map} from 'angular2/src/facade/collection';
export class MeasureValues {
constructor(public runIndex: number, public timeStamp: Date,
public values: {[key: string]: any}) {}
toJson() {
return {
'timeStamp': DateWrapper.toJson(this.timeStamp),
'runIndex': this.runIndex,
'values': this.values
};
}
}
| erictsangx/angular | modules/benchpress/src/measure_values.ts | TypeScript | mit | 415 |
/*
* Copyright (C) 2009 Texas Instruments Inc
* Copyright (C) 2014 Lad, Prabhakar <prabhakar.csengg@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* TODO : add support for VBI & HBI data service
* add static buffer allocation
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include "vpif.h"
#include "vpif_capture.h"
MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(VPIF_CAPTURE_VERSION);
#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
#define vpif_dbg(level, debug, fmt, arg...) \
v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg)
static int debug = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level 0-1");
#define VPIF_DRIVER_NAME "vpif_capture"
/* global variables */
static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
static void vpif_calculate_offsets(struct channel_obj *ch);
static void vpif_config_addr(struct channel_obj *ch, int muxmode);
static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} };
/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */
static int ycmux_mode;
static inline
struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct vpif_cap_buffer, vb);
}
/**
* vpif_buffer_prepare : callback function for buffer prepare
* @vb: ptr to vb2_buffer
*
* This is the callback function for buffer prepare when vb2_qbuf()
* function is called. The buffer is prepared and user space virtual address
* or user address is converted into physical address
*/
static int vpif_buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *q = vb->vb2_queue;
struct channel_obj *ch = vb2_get_drv_priv(q);
struct common_obj *common;
unsigned long addr;
vpif_dbg(2, debug, "vpif_buffer_prepare\n");
common = &ch->common[VPIF_VIDEO_INDEX];
vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
vbuf->field = common->fmt.fmt.pix.field;
addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
!IS_ALIGNED((addr + common->ybtm_off), 8) ||
!IS_ALIGNED((addr + common->ctop_off), 8) ||
!IS_ALIGNED((addr + common->cbtm_off), 8)) {
vpif_dbg(1, debug, "offset is not aligned\n");
return -EINVAL;
}
return 0;
}
/**
* vpif_buffer_queue_setup : Callback function for buffer setup.
* @vq: vb2_queue ptr
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
* @sizes[]: contains the size (in bytes) of each plane.
* @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer count and buffer size
*/
static int vpif_buffer_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
unsigned size = common->fmt.fmt.pix.sizeimage;
vpif_dbg(2, debug, "vpif_buffer_setup\n");
if (*nplanes) {
if (sizes[0] < size)
return -EINVAL;
size = sizes[0];
}
if (vq->num_buffers + *nbuffers < 3)
*nbuffers = 3 - vq->num_buffers;
*nplanes = 1;
sizes[0] = size;
/* Calculate the offset for Y and C data in the buffer */
vpif_calculate_offsets(ch);
return 0;
}
/**
* vpif_buffer_queue : Callback function to add buffer to DMA queue
* @vb: ptr to vb2_buffer
*/
static void vpif_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue);
struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf);
struct common_obj *common;
unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
vpif_dbg(2, debug, "vpif_buffer_queue\n");
spin_lock_irqsave(&common->irqlock, flags);
/* add the buffer to the DMA queue */
list_add_tail(&buf->list, &common->dma_queue);
spin_unlock_irqrestore(&common->irqlock, flags);
}
/**
* vpif_start_streaming : Starts the DMA engine for streaming
* @vb: ptr to vb2_buffer
* @count: number of buffers
*/
static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpif_capture_config *vpif_config_data =
vpif_dev->platform_data;
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
struct vpif_cap_buffer *buf, *tmp;
unsigned long addr, flags;
int ret;
spin_lock_irqsave(&common->irqlock, flags);
/* Initialize field_id */
ch->field_id = 0;
/* configure 1 or 2 channel mode */
if (vpif_config_data->setup_input_channel_mode) {
ret = vpif_config_data->
setup_input_channel_mode(vpif->std_info.ycmux_mode);
if (ret < 0) {
vpif_dbg(1, debug, "can't set vpif channel mode\n");
goto err;
}
}
ret = v4l2_subdev_call(ch->sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
vpif_dbg(1, debug, "stream on failed in subdev\n");
goto err;
}
/* Call vpif_set_params function to set the parameters and addresses */
ret = vpif_set_video_params(vpif, ch->channel_id);
if (ret < 0) {
vpif_dbg(1, debug, "can't set video params\n");
goto err;
}
ycmux_mode = ret;
vpif_config_addr(ch, ret);
/* Get the next frame from the buffer queue */
common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove buffer from the buffer queue */
list_del(&common->cur_frm->list);
spin_unlock_irqrestore(&common->irqlock, flags);
addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0);
common->set_addr(addr + common->ytop_off,
addr + common->ybtm_off,
addr + common->ctop_off,
addr + common->cbtm_off);
/**
* Set interrupt for both the fields in VPIF Register enable channel in
* VPIF register
*/
channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
channel0_intr_assert();
channel0_intr_enable(1);
enable_channel0(1);
}
if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
ycmux_mode == 2) {
channel1_intr_assert();
channel1_intr_enable(1);
enable_channel1(1);
}
return 0;
err:
list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) {
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
/**
* vpif_stop_streaming : Stop the DMA engine
* @vq: ptr to vb2_queue
*
* This callback stops the DMA engine and any remaining buffers
* in the DMA queue are released.
*/
static void vpif_stop_streaming(struct vb2_queue *vq)
{
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common;
unsigned long flags;
int ret;
common = &ch->common[VPIF_VIDEO_INDEX];
/* Disable channel as per its device type and channel id */
if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
enable_channel0(0);
channel0_intr_enable(0);
}
if (VPIF_CHANNEL1_VIDEO == ch->channel_id ||
ycmux_mode == 2) {
enable_channel1(0);
channel1_intr_enable(0);
}
ycmux_mode = 0;
ret = v4l2_subdev_call(ch->sd, video, s_stream, 0);
if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
vpif_dbg(1, debug, "stream off failed in subdev\n");
/* release all active buffers */
spin_lock_irqsave(&common->irqlock, flags);
if (common->cur_frm == common->next_frm) {
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
if (common->cur_frm)
vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
if (common->next_frm)
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&common->irqlock, flags);
}
static struct vb2_ops video_qops = {
.queue_setup = vpif_buffer_queue_setup,
.buf_prepare = vpif_buffer_prepare,
.start_streaming = vpif_start_streaming,
.stop_streaming = vpif_stop_streaming,
.buf_queue = vpif_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
/**
* vpif_process_buffer_complete: process a completed buffer
* @common: ptr to common channel object
*
* This function time stamp the buffer and mark it as DONE. It also
* wake up any process waiting on the QUEUE and set the next buffer
* as current
*/
static void vpif_process_buffer_complete(struct common_obj *common)
{
common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
/* Make curFrm pointing to nextFrm */
common->cur_frm = common->next_frm;
}
/**
* vpif_schedule_next_buffer: set next buffer address for capture
* @common : ptr to common channel object
*
* This function will get next buffer from the dma queue and
* set the buffer address in the vpif register for capture.
* the buffer is marked active
*/
static void vpif_schedule_next_buffer(struct common_obj *common)
{
unsigned long addr = 0;
spin_lock(&common->irqlock);
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
spin_unlock(&common->irqlock);
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0);
/* Set top and bottom field addresses in VPIF registers */
common->set_addr(addr + common->ytop_off,
addr + common->ybtm_off,
addr + common->ctop_off,
addr + common->cbtm_off);
}
/**
* vpif_channel_isr : ISR handler for vpif capture
* @irq: irq number
* @dev_id: dev_id ptr
*
* It changes status of the captured buffer, takes next buffer from the queue
* and sets its address in VPIF registers
*/
static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
{
struct vpif_device *dev = &vpif_obj;
struct common_obj *common;
struct channel_obj *ch;
int channel_id;
int fid = -1, i;
channel_id = *(int *)(dev_id);
if (!vpif_intr_status(channel_id))
return IRQ_NONE;
ch = dev->dev[channel_id];
for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) {
common = &ch->common[i];
/* skip If streaming is not started in this channel */
/* Check the field format */
if (1 == ch->vpifparams.std_info.frm_fmt) {
/* Progressive mode */
spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue)) {
spin_unlock(&common->irqlock);
continue;
}
spin_unlock(&common->irqlock);
if (!channel_first_int[i][channel_id])
vpif_process_buffer_complete(common);
channel_first_int[i][channel_id] = 0;
vpif_schedule_next_buffer(common);
channel_first_int[i][channel_id] = 0;
} else {
/**
* Interlaced mode. If it is first interrupt, ignore
* it
*/
if (channel_first_int[i][channel_id]) {
channel_first_int[i][channel_id] = 0;
continue;
}
if (0 == i) {
ch->field_id ^= 1;
/* Get field id from VPIF registers */
fid = vpif_channel_getfid(ch->channel_id);
if (fid != ch->field_id) {
/**
* If field id does not match stored
* field id, make them in sync
*/
if (0 == fid)
ch->field_id = fid;
return IRQ_HANDLED;
}
}
/* device field id and local field id are in sync */
if (0 == fid) {
/* this is even field */
if (common->cur_frm == common->next_frm)
continue;
/* mark the current buffer as done */
vpif_process_buffer_complete(common);
} else if (1 == fid) {
/* odd field */
spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue) ||
(common->cur_frm != common->next_frm)) {
spin_unlock(&common->irqlock);
continue;
}
spin_unlock(&common->irqlock);
vpif_schedule_next_buffer(common);
}
}
}
return IRQ_HANDLED;
}
/**
* vpif_update_std_info() - update standard related info
* @ch: ptr to channel object
*
* For a given standard selected by application, update values
* in the device data structures
*/
static int vpif_update_std_info(struct channel_obj *ch)
{
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpifparams = &ch->vpifparams;
const struct vpif_channel_config_params *config;
struct vpif_channel_config_params *std_info = &vpifparams->std_info;
struct video_obj *vid_ch = &ch->video;
int index;
vpif_dbg(2, debug, "vpif_update_std_info\n");
for (index = 0; index < vpif_ch_params_count; index++) {
config = &vpif_ch_params[index];
if (config->hd_sd == 0) {
vpif_dbg(2, debug, "SD format\n");
if (config->stdid & vid_ch->stdid) {
memcpy(std_info, config, sizeof(*config));
break;
}
} else {
vpif_dbg(2, debug, "HD format\n");
if (!memcmp(&config->dv_timings, &vid_ch->dv_timings,
sizeof(vid_ch->dv_timings))) {
memcpy(std_info, config, sizeof(*config));
break;
}
}
}
/* standard not found */
if (index == vpif_ch_params_count)
return -EINVAL;
common->fmt.fmt.pix.width = std_info->width;
common->width = std_info->width;
common->fmt.fmt.pix.height = std_info->height;
common->height = std_info->height;
common->fmt.fmt.pix.sizeimage = common->height * common->width * 2;
common->fmt.fmt.pix.bytesperline = std_info->width;
vpifparams->video_params.hpitch = std_info->width;
vpifparams->video_params.storage_mode = std_info->frm_fmt;
if (vid_ch->stdid)
common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
else
common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
if (ch->vpifparams.std_info.frm_fmt)
common->fmt.fmt.pix.field = V4L2_FIELD_NONE;
else
common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
else
common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
/**
* vpif_calculate_offsets : This function calculates buffers offsets
* @ch : ptr to channel object
*
* This function calculates buffer offsets for Y and C in the top and
* bottom field
*/
static void vpif_calculate_offsets(struct channel_obj *ch)
{
unsigned int hpitch, sizeimage;
struct video_obj *vid_ch = &(ch->video);
struct vpif_params *vpifparams = &ch->vpifparams;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
enum v4l2_field field = common->fmt.fmt.pix.field;
vpif_dbg(2, debug, "vpif_calculate_offsets\n");
if (V4L2_FIELD_ANY == field) {
if (vpifparams->std_info.frm_fmt)
vid_ch->buf_field = V4L2_FIELD_NONE;
else
vid_ch->buf_field = V4L2_FIELD_INTERLACED;
} else
vid_ch->buf_field = common->fmt.fmt.pix.field;
sizeimage = common->fmt.fmt.pix.sizeimage;
hpitch = common->fmt.fmt.pix.bytesperline;
if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
(V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
common->ytop_off = 0;
common->ybtm_off = hpitch;
common->ctop_off = sizeimage / 2;
common->cbtm_off = sizeimage / 2 + hpitch;
} else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) {
/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
common->ytop_off = 0;
common->ybtm_off = sizeimage / 4;
common->ctop_off = sizeimage / 2;
common->cbtm_off = common->ctop_off + sizeimage / 4;
} else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) {
/* Calculate offsets for Y top, Y Bottom, C top and C Bottom */
common->ybtm_off = 0;
common->ytop_off = sizeimage / 4;
common->cbtm_off = sizeimage / 2;
common->ctop_off = common->cbtm_off + sizeimage / 4;
}
if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
(V4L2_FIELD_INTERLACED == vid_ch->buf_field))
vpifparams->video_params.storage_mode = 1;
else
vpifparams->video_params.storage_mode = 0;
if (1 == vpifparams->std_info.frm_fmt)
vpifparams->video_params.hpitch =
common->fmt.fmt.pix.bytesperline;
else {
if ((field == V4L2_FIELD_ANY)
|| (field == V4L2_FIELD_INTERLACED))
vpifparams->video_params.hpitch =
common->fmt.fmt.pix.bytesperline * 2;
else
vpifparams->video_params.hpitch =
common->fmt.fmt.pix.bytesperline;
}
ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid;
}
/**
* vpif_get_default_field() - Get default field type based on interface
* @vpif_params - ptr to vpif params
*/
static inline enum v4l2_field vpif_get_default_field(
struct vpif_interface *iface)
{
return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE :
V4L2_FIELD_INTERLACED;
}
/**
* vpif_config_addr() - function to configure buffer address in vpif
* @ch - channel ptr
* @muxmode - channel mux mode
*/
static void vpif_config_addr(struct channel_obj *ch, int muxmode)
{
struct common_obj *common;
vpif_dbg(2, debug, "vpif_config_addr\n");
common = &(ch->common[VPIF_VIDEO_INDEX]);
if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
common->set_addr = ch1_set_videobuf_addr;
else if (2 == muxmode)
common->set_addr = ch0_set_videobuf_addr_yc_nmux;
else
common->set_addr = ch0_set_videobuf_addr;
}
/**
* vpif_input_to_subdev() - Maps input to sub device
* @vpif_cfg - global config ptr
* @chan_cfg - channel config ptr
* @input_index - Given input index from application
*
* lookup the sub device information for a given input index.
* we report all the inputs to application. inputs table also
* has sub device name for the each input
*/
static int vpif_input_to_subdev(
struct vpif_capture_config *vpif_cfg,
struct vpif_capture_chan_config *chan_cfg,
int input_index)
{
struct vpif_subdev_info *subdev_info;
const char *subdev_name;
int i;
vpif_dbg(2, debug, "vpif_input_to_subdev\n");
subdev_name = chan_cfg->inputs[input_index].subdev_name;
if (!subdev_name)
return -1;
/* loop through the sub device list to get the sub device info */
for (i = 0; i < vpif_cfg->subdev_count; i++) {
subdev_info = &vpif_cfg->subdev_info[i];
if (!strcmp(subdev_info->name, subdev_name))
return i;
}
return -1;
}
/**
* vpif_set_input() - Select an input
* @vpif_cfg - global config ptr
* @ch - channel
* @_index - Given input index from application
*
* Select the given input.
*/
static int vpif_set_input(
struct vpif_capture_config *vpif_cfg,
struct channel_obj *ch,
int index)
{
struct vpif_capture_chan_config *chan_cfg =
&vpif_cfg->chan_config[ch->channel_id];
struct vpif_subdev_info *subdev_info = NULL;
struct v4l2_subdev *sd = NULL;
u32 input = 0, output = 0;
int sd_index;
int ret;
sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index);
if (sd_index >= 0) {
sd = vpif_obj.sd[sd_index];
subdev_info = &vpif_cfg->subdev_info[sd_index];
}
/* first setup input path from sub device to vpif */
if (sd && vpif_cfg->setup_input_path) {
ret = vpif_cfg->setup_input_path(ch->channel_id,
subdev_info->name);
if (ret < 0) {
vpif_dbg(1, debug, "couldn't setup input path for the" \
" sub device %s, for input index %d\n",
subdev_info->name, index);
return ret;
}
}
if (sd) {
input = chan_cfg->inputs[index].input_route;
output = chan_cfg->inputs[index].output_route;
ret = v4l2_subdev_call(sd, video, s_routing,
input, output, 0);
if (ret < 0 && ret != -ENOIOCTLCMD) {
vpif_dbg(1, debug, "Failed to set input\n");
return ret;
}
}
ch->input_idx = index;
ch->sd = sd;
/* copy interface parameters to vpif */
ch->vpifparams.iface = chan_cfg->vpif_if;
/* update tvnorms from the sub device input info */
ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std;
return 0;
}
/**
* vpif_querystd() - querystd handler
* @file: file ptr
* @priv: file handle
* @std_id: ptr to std id
*
* This function is called to detect standard at the selected input
*/
static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
int ret;
vpif_dbg(2, debug, "vpif_querystd\n");
/* Call querystd function of decoder device */
ret = v4l2_subdev_call(ch->sd, video, querystd, std_id);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
if (ret) {
vpif_dbg(1, debug, "Failed to query standard for sub devices\n");
return ret;
}
return 0;
}
/**
* vpif_g_std() - get STD handler
* @file: file ptr
* @priv: file handle
* @std_id: ptr to std id
*/
static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
vpif_dbg(2, debug, "vpif_g_std\n");
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
*std = ch->video.stdid;
return 0;
}
/**
* vpif_s_std() - set STD handler
* @file: file ptr
* @priv: file handle
* @std_id: ptr to std id
*/
static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
int ret;
vpif_dbg(2, debug, "vpif_s_std\n");
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
if (vb2_is_busy(&common->buffer_queue))
return -EBUSY;
/* Call encoder subdevice function to set the standard */
ch->video.stdid = std_id;
memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
/* Get the information about the standard */
if (vpif_update_std_info(ch)) {
vpif_err("Error getting the standard info\n");
return -EINVAL;
}
/* set standard in the sub device */
ret = v4l2_subdev_call(ch->sd, video, s_std, std_id);
if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) {
vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
return ret;
}
return 0;
}
/**
* vpif_enum_input() - ENUMINPUT handler
* @file: file ptr
* @priv: file handle
* @input: ptr to input structure
*/
static int vpif_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct vpif_capture_chan_config *chan_cfg;
chan_cfg = &config->chan_config[ch->channel_id];
if (input->index >= chan_cfg->input_count)
return -EINVAL;
memcpy(input, &chan_cfg->inputs[input->index].input,
sizeof(*input));
return 0;
}
/**
* vpif_g_input() - Get INPUT handler
* @file: file ptr
* @priv: file handle
* @index: ptr to input index
*/
static int vpif_g_input(struct file *file, void *priv, unsigned int *index)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
*index = ch->input_idx;
return 0;
}
/**
* vpif_s_input() - Set INPUT handler
* @file: file ptr
* @priv: file handle
* @index: input index
*/
static int vpif_s_input(struct file *file, void *priv, unsigned int index)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_capture_chan_config *chan_cfg;
chan_cfg = &config->chan_config[ch->channel_id];
if (index >= chan_cfg->input_count)
return -EINVAL;
if (vb2_is_busy(&common->buffer_queue))
return -EBUSY;
return vpif_set_input(config, ch, index);
}
/**
* vpif_enum_fmt_vid_cap() - ENUM_FMT handler
* @file: file ptr
* @priv: file handle
* @index: input index
*/
static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
if (fmt->index != 0) {
vpif_dbg(1, debug, "Invalid format index\n");
return -EINVAL;
}
/* Fill in the information about format */
if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) {
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb");
fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
} else {
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
}
return 0;
}
/**
* vpif_try_fmt_vid_cap() - TRY_FMT handler
* @file: file ptr
* @priv: file handle
* @fmt: ptr to v4l2 format structure
*/
static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
struct vpif_params *vpif_params = &ch->vpifparams;
/*
* to supress v4l-compliance warnings silently correct
* the pixelformat
*/
if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) {
if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8)
pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
} else {
if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P;
}
common->fmt.fmt.pix.pixelformat = pixfmt->pixelformat;
vpif_update_std_info(ch);
pixfmt->field = common->fmt.fmt.pix.field;
pixfmt->colorspace = common->fmt.fmt.pix.colorspace;
pixfmt->bytesperline = common->fmt.fmt.pix.width;
pixfmt->width = common->fmt.fmt.pix.width;
pixfmt->height = common->fmt.fmt.pix.height;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2;
pixfmt->priv = 0;
return 0;
}
/**
* vpif_g_fmt_vid_cap() - Set INPUT handler
* @file: file ptr
* @priv: file handle
* @fmt: ptr to v4l2 format structure
*/
static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
/* Check the validity of the buffer type */
if (common->fmt.type != fmt->type)
return -EINVAL;
/* Fill in the information about format */
*fmt = common->fmt;
return 0;
}
/**
* vpif_s_fmt_vid_cap() - Set FMT handler
* @file: file ptr
* @priv: file handle
* @fmt: ptr to v4l2 format structure
*/
static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret;
vpif_dbg(2, debug, "%s\n", __func__);
if (vb2_is_busy(&common->buffer_queue))
return -EBUSY;
ret = vpif_try_fmt_vid_cap(file, priv, fmt);
if (ret)
return ret;
/* store the format in the channel object */
common->fmt = *fmt;
return 0;
}
/**
* vpif_querycap() - QUERYCAP handler
* @file: file ptr
* @priv: file handle
* @cap: ptr to v4l2_capability structure
*/
static int vpif_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strlcpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(vpif_dev));
strlcpy(cap->card, config->card_name, sizeof(cap->card));
return 0;
}
/**
* vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler
* @file: file ptr
* @priv: file handle
* @timings: input timings
*/
static int
vpif_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
int ret;
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
return -ENODATA;
timings->pad = 0;
ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
/**
* vpif_query_dv_timings() - QUERY_DV_TIMINGS handler
* @file: file ptr
* @priv: file handle
* @timings: input timings
*/
static int
vpif_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
int ret;
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
return -ENODATA;
ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
return ret;
}
/**
* vpif_s_dv_timings() - S_DV_TIMINGS handler
* @file: file ptr
* @priv: file handle
* @timings: digital video timings
*/
static int vpif_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct vpif_params *vpifparams = &ch->vpifparams;
struct vpif_channel_config_params *std_info = &vpifparams->std_info;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct video_obj *vid_ch = &ch->video;
struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt;
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
int ret;
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
return -ENODATA;
if (timings->type != V4L2_DV_BT_656_1120) {
vpif_dbg(2, debug, "Timing type not defined\n");
return -EINVAL;
}
if (vb2_is_busy(&common->buffer_queue))
return -EBUSY;
/* Configure subdevice timings, if any */
ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings);
if (ret == -ENOIOCTLCMD || ret == -ENODEV)
ret = 0;
if (ret < 0) {
vpif_dbg(2, debug, "Error setting custom DV timings\n");
return ret;
}
if (!(timings->bt.width && timings->bt.height &&
(timings->bt.hbackporch ||
timings->bt.hfrontporch ||
timings->bt.hsync) &&
timings->bt.vfrontporch &&
(timings->bt.vbackporch ||
timings->bt.vsync))) {
vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
return -EINVAL;
}
vid_ch->dv_timings = *timings;
/* Configure video port timings */
std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
std_info->sav2eav = bt->width;
std_info->l1 = 1;
std_info->l3 = bt->vsync + bt->vbackporch + 1;
std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
if (bt->interlaced) {
if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
std_info->l5 = std_info->vsize/2 -
(bt->vfrontporch - 1);
std_info->l7 = std_info->vsize/2 + 1;
std_info->l9 = std_info->l7 + bt->il_vsync +
bt->il_vbackporch + 1;
std_info->l11 = std_info->vsize -
(bt->il_vfrontporch - 1);
} else {
vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
return -EINVAL;
}
} else {
std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
}
strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
std_info->width = bt->width;
std_info->height = bt->height;
std_info->frm_fmt = bt->interlaced ? 0 : 1;
std_info->ycmux_mode = 0;
std_info->capture_format = 0;
std_info->vbi_supported = 0;
std_info->hd_sd = 1;
std_info->stdid = 0;
vid_ch->stdid = 0;
return 0;
}
/**
* vpif_g_dv_timings() - G_DV_TIMINGS handler
* @file: file ptr
* @priv: file handle
* @timings: digital video timings
*/
static int vpif_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vpif_capture_config *config = vpif_dev->platform_data;
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct video_obj *vid_ch = &ch->video;
struct vpif_capture_chan_config *chan_cfg;
struct v4l2_input input;
if (!config->chan_config[ch->channel_id].inputs)
return -ENODATA;
chan_cfg = &config->chan_config[ch->channel_id];
input = chan_cfg->inputs[ch->input_idx].input;
if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS)
return -ENODATA;
*timings = vid_ch->dv_timings;
return 0;
}
/*
* vpif_log_status() - Status information
* @file: file ptr
* @priv: file handle
*
* Returns zero.
*/
static int vpif_log_status(struct file *filep, void *priv)
{
/* status for sub devices */
v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
return 0;
}
/* vpif capture ioctl operations */
static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
.vidioc_querycap = vpif_querycap,
.vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap,
.vidioc_enum_input = vpif_enum_input,
.vidioc_s_input = vpif_s_input,
.vidioc_g_input = vpif_g_input,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_querystd = vpif_querystd,
.vidioc_s_std = vpif_s_std,
.vidioc_g_std = vpif_g_std,
.vidioc_enum_dv_timings = vpif_enum_dv_timings,
.vidioc_query_dv_timings = vpif_query_dv_timings,
.vidioc_s_dv_timings = vpif_s_dv_timings,
.vidioc_g_dv_timings = vpif_g_dv_timings,
.vidioc_log_status = vpif_log_status,
};
/* vpif file operations */
static struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll
};
/**
* initialize_vpif() - Initialize vpif data structures
*
* Allocate memory for data structures and initialize them
*/
static int initialize_vpif(void)
{
int err, i, j;
int free_channel_objects_index;
/* Allocate memory for six channel objects */
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
vpif_obj.dev[i] =
kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL);
/* If memory allocation fails, return error */
if (!vpif_obj.dev[i]) {
free_channel_objects_index = i;
err = -ENOMEM;
goto vpif_init_free_channel_objects;
}
}
return 0;
vpif_init_free_channel_objects:
for (j = 0; j < free_channel_objects_index; j++)
kfree(vpif_obj.dev[j]);
return err;
}
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
int i;
for (i = 0; i < vpif_obj.config->subdev_count; i++)
if (!strcmp(vpif_obj.config->subdev_info[i].name,
subdev->name)) {
vpif_obj.sd[i] = subdev;
return 0;
}
return -EINVAL;
}
static int vpif_probe_complete(void)
{
struct common_obj *common;
struct video_device *vdev;
struct channel_obj *ch;
struct vb2_queue *q;
int j, err, k;
for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
ch = vpif_obj.dev[j];
ch->channel_id = j;
common = &(ch->common[VPIF_VIDEO_INDEX]);
spin_lock_init(&common->irqlock);
mutex_init(&common->lock);
/* select input 0 */
err = vpif_set_input(vpif_obj.config, ch, 0);
if (err)
goto probe_out;
/* set initial format */
ch->video.stdid = V4L2_STD_525_60;
memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
vpif_update_std_info(ch);
/* Initialize vb2 queue */
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = ch;
q->ops = &video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpif_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 1;
q->lock = &common->lock;
q->dev = vpif_dev;
err = vb2_queue_init(q);
if (err) {
vpif_err("vpif_capture: vb2_queue_init() failed\n");
goto probe_out;
}
INIT_LIST_HEAD(&common->dma_queue);
/* Initialize the video_device structure */
vdev = &ch->video_dev;
strlcpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
vdev->fops = &vpif_fops;
vdev->ioctl_ops = &vpif_ioctl_ops;
vdev->v4l2_dev = &vpif_obj.v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &common->lock;
video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev,
VFL_TYPE_GRABBER, (j ? 1 : 0));
if (err)
goto probe_out;
}
v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
return 0;
probe_out:
for (k = 0; k < j; k++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[k];
common = &ch->common[k];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
}
kfree(vpif_obj.sd);
v4l2_device_unregister(&vpif_obj.v4l2_dev);
return err;
}
static int vpif_async_complete(struct v4l2_async_notifier *notifier)
{
return vpif_probe_complete();
}
/**
* vpif_probe : This function probes the vpif capture driver
* @pdev: platform device pointer
*
* This creates device entries by register itself to the V4L2 driver and
* initializes fields of each channel objects
*/
static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
struct i2c_adapter *i2c_adap;
struct resource *res;
int subdev_count;
int res_idx = 0;
int i, err;
vpif_dev = &pdev->dev;
err = initialize_vpif();
if (err) {
v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
return err;
}
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
return err;
}
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
err = devm_request_irq(&pdev->dev, res->start, vpif_channel_isr,
IRQF_SHARED, VPIF_DRIVER_NAME,
(void *)(&vpif_obj.dev[res_idx]->
channel_id));
if (err) {
err = -EINVAL;
goto vpif_unregister;
}
res_idx++;
}
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
if (!vpif_obj.sd) {
err = -ENOMEM;
goto vpif_unregister;
}
if (!vpif_obj.config->asd_sizes) {
i2c_adap = i2c_get_adapter(1);
for (i = 0; i < subdev_count; i++) {
subdevdata = &vpif_obj.config->subdev_info[i];
vpif_obj.sd[i] =
v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
i2c_adap,
&subdevdata->
board_info,
NULL);
if (!vpif_obj.sd[i]) {
vpif_err("Error registering v4l2 subdevice\n");
err = -ENODEV;
goto probe_subdev_out;
}
v4l2_info(&vpif_obj.v4l2_dev,
"registered sub device %s\n",
subdevdata->name);
}
vpif_probe_complete();
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
vpif_obj.notifier.bound = vpif_async_bound;
vpif_obj.notifier.complete = vpif_async_complete;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
vpif_err("Error registering async notifier\n");
err = -EINVAL;
goto probe_subdev_out;
}
}
return 0;
probe_subdev_out:
/* free sub devices memory */
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
return err;
}
/**
* vpif_remove() - driver remove handler
* @device: ptr to platform device structure
*
* The vidoe device is unregistered
*/
static int vpif_remove(struct platform_device *device)
{
struct common_obj *common;
struct channel_obj *ch;
int i;
v4l2_device_unregister(&vpif_obj.v4l2_dev);
kfree(vpif_obj.sd);
/* un-register device */
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
/**
* vpif_suspend: vpif device suspend
*/
static int vpif_suspend(struct device *dev)
{
struct common_obj *common;
struct channel_obj *ch;
int i;
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
/* Disable channel */
if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
enable_channel0(0);
channel0_intr_enable(0);
}
if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
ycmux_mode == 2) {
enable_channel1(0);
channel1_intr_enable(0);
}
mutex_unlock(&common->lock);
}
return 0;
}
/*
* vpif_resume: vpif device suspend
*/
static int vpif_resume(struct device *dev)
{
struct common_obj *common;
struct channel_obj *ch;
int i;
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
/* Enable channel */
if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
enable_channel0(1);
channel0_intr_enable(1);
}
if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
ycmux_mode == 2) {
enable_channel1(1);
channel1_intr_enable(1);
}
mutex_unlock(&common->lock);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = VPIF_DRIVER_NAME,
.pm = &vpif_pm_ops,
},
.probe = vpif_probe,
.remove = vpif_remove,
};
module_platform_driver(vpif_driver);
| bas-t/linux_media | drivers/media/platform/davinci/vpif_capture.c | C | gpl-2.0 | 44,209 |
// Copyright (C) 2011-2014 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
//
// { dg-do compile }
#include <map>
struct Key
{
Key() { }
Key(const Key&) { }
template<typename T>
Key(const T&)
{ }
bool operator<(const Key&) const;
};
#if __cplusplus < 201103L
// libstdc++/47628
void f()
{
typedef std::multimap<Key, int> MMap;
MMap mm;
mm.insert(MMap::value_type());
MMap::iterator i = mm.begin();
mm.erase(i);
}
#endif
| iains/darwin-gcc-4-9 | libstdc++-v3/testsuite/23_containers/multimap/modifiers/erase/47628.cc | C++ | gpl-2.0 | 1,145 |
/*
* linux/arch/arm/mach-realview/realview_eb.c
*
* Copyright (C) 2004 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
#include <linux/amba/pl022.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <mach/board-eb.h>
#include <mach/irqs.h>
#include "core.h"
static struct map_desc realview_eb_io_desc[] __initdata = {
{
.virtual = IO_ADDRESS(REALVIEW_SYS_BASE),
.pfn = __phys_to_pfn(REALVIEW_SYS_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB_GIC_CPU_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB_GIC_CPU_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB_GIC_DIST_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB_GIC_DIST_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_SCTL_BASE),
.pfn = __phys_to_pfn(REALVIEW_SCTL_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB_TIMER0_1_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB_TIMER0_1_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB_TIMER2_3_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB_TIMER2_3_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
},
#ifdef CONFIG_DEBUG_LL
{
.virtual = IO_ADDRESS(REALVIEW_EB_UART0_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB_UART0_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}
#endif
};
static struct map_desc realview_eb11mp_io_desc[] __initdata = {
{
.virtual = IO_ADDRESS(REALVIEW_EB11MP_SCU_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB11MP_SCU_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB11MP_GIC_DIST_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB11MP_GIC_DIST_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = IO_ADDRESS(REALVIEW_EB11MP_L220_BASE),
.pfn = __phys_to_pfn(REALVIEW_EB11MP_L220_BASE),
.length = SZ_8K,
.type = MT_DEVICE,
}
};
static void __init realview_eb_map_io(void)
{
iotable_init(realview_eb_io_desc, ARRAY_SIZE(realview_eb_io_desc));
if (core_tile_eb11mp() || core_tile_a9mp())
iotable_init(realview_eb11mp_io_desc, ARRAY_SIZE(realview_eb11mp_io_desc));
}
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {
.bus_id = 0,
.enable_dma = 0,
.num_chipselect = 1,
};
/*
* RealView EB AMBA devices
*/
/*
* These devices are connected via the core APB bridge
*/
#define GPIO2_IRQ { IRQ_EB_GPIO2, NO_IRQ }
#define GPIO3_IRQ { IRQ_EB_GPIO3, NO_IRQ }
#define AACI_IRQ { IRQ_EB_AACI, NO_IRQ }
#define MMCI0_IRQ { IRQ_EB_MMCI0A, IRQ_EB_MMCI0B }
#define KMI0_IRQ { IRQ_EB_KMI0, NO_IRQ }
#define KMI1_IRQ { IRQ_EB_KMI1, NO_IRQ }
/*
* These devices are connected directly to the multi-layer AHB switch
*/
#define EB_SMC_IRQ { NO_IRQ, NO_IRQ }
#define MPMC_IRQ { NO_IRQ, NO_IRQ }
#define EB_CLCD_IRQ { IRQ_EB_CLCD, NO_IRQ }
#define DMAC_IRQ { IRQ_EB_DMA, NO_IRQ }
/*
* These devices are connected via the core APB bridge
*/
#define SCTL_IRQ { NO_IRQ, NO_IRQ }
#define EB_WATCHDOG_IRQ { IRQ_EB_WDOG, NO_IRQ }
#define EB_GPIO0_IRQ { IRQ_EB_GPIO0, NO_IRQ }
#define GPIO1_IRQ { IRQ_EB_GPIO1, NO_IRQ }
#define EB_RTC_IRQ { IRQ_EB_RTC, NO_IRQ }
/*
* These devices are connected via the DMA APB bridge
*/
#define SCI_IRQ { IRQ_EB_SCI, NO_IRQ }
#define EB_UART0_IRQ { IRQ_EB_UART0, NO_IRQ }
#define EB_UART1_IRQ { IRQ_EB_UART1, NO_IRQ }
#define EB_UART2_IRQ { IRQ_EB_UART2, NO_IRQ }
#define EB_UART3_IRQ { IRQ_EB_UART3, NO_IRQ }
#define EB_SSP_IRQ { IRQ_EB_SSP, NO_IRQ }
/* FPGA Primecells */
AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
AMBA_DEVICE(uart3, "fpga:uart3", EB_UART3, NULL);
/* DevChip Primecells */
AMBA_DEVICE(smc, "dev:smc", EB_SMC, NULL);
AMBA_DEVICE(clcd, "dev:clcd", EB_CLCD, &clcd_plat_data);
AMBA_DEVICE(dmac, "dev:dmac", DMAC, NULL);
AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
AMBA_DEVICE(wdog, "dev:wdog", EB_WATCHDOG, NULL);
AMBA_DEVICE(gpio0, "dev:gpio0", EB_GPIO0, &gpio0_plat_data);
AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
AMBA_DEVICE(rtc, "dev:rtc", EB_RTC, NULL);
AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
AMBA_DEVICE(uart0, "dev:uart0", EB_UART0, NULL);
AMBA_DEVICE(uart1, "dev:uart1", EB_UART1, NULL);
AMBA_DEVICE(uart2, "dev:uart2", EB_UART2, NULL);
AMBA_DEVICE(ssp0, "dev:ssp0", EB_SSP, &ssp0_plat_data);
static struct amba_device *amba_devs[] __initdata = {
&dmac_device,
&uart0_device,
&uart1_device,
&uart2_device,
&uart3_device,
&smc_device,
&clcd_device,
&sctl_device,
&wdog_device,
&gpio0_device,
&gpio1_device,
&gpio2_device,
&rtc_device,
&sci0_device,
&ssp0_device,
&aaci_device,
&mmc0_device,
&kmi0_device,
&kmi1_device,
};
/*
* RealView EB platform devices
*/
static struct resource realview_eb_flash_resource = {
.start = REALVIEW_EB_FLASH_BASE,
.end = REALVIEW_EB_FLASH_BASE + REALVIEW_EB_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct resource realview_eb_eth_resources[] = {
[0] = {
.start = REALVIEW_EB_ETH_BASE,
.end = REALVIEW_EB_ETH_BASE + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_EB_ETH,
.end = IRQ_EB_ETH,
.flags = IORESOURCE_IRQ,
},
};
/*
* Detect and register the correct Ethernet device. RealView/EB rev D
* platforms use the newer SMSC LAN9118 Ethernet chip
*/
static int eth_device_register(void)
{
void __iomem *eth_addr = ioremap(REALVIEW_EB_ETH_BASE, SZ_4K);
const char *name = NULL;
u32 idrev;
if (!eth_addr)
return -ENOMEM;
idrev = readl(eth_addr + 0x50);
if ((idrev & 0xFFFF0000) != 0x01180000)
/* SMSC LAN9118 not present, use LAN91C111 instead */
name = "smc91x";
iounmap(eth_addr);
return realview_eth_register(name, realview_eb_eth_resources);
}
static struct resource realview_eb_isp1761_resources[] = {
[0] = {
.start = REALVIEW_EB_USB_BASE,
.end = REALVIEW_EB_USB_BASE + SZ_128K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_EB_USB,
.end = IRQ_EB_USB,
.flags = IORESOURCE_IRQ,
},
};
static struct resource pmu_resources[] = {
[0] = {
.start = IRQ_EB11MP_PMU_CPU0,
.end = IRQ_EB11MP_PMU_CPU0,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = IRQ_EB11MP_PMU_CPU1,
.end = IRQ_EB11MP_PMU_CPU1,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = IRQ_EB11MP_PMU_CPU2,
.end = IRQ_EB11MP_PMU_CPU2,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = IRQ_EB11MP_PMU_CPU3,
.end = IRQ_EB11MP_PMU_CPU3,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device pmu_device = {
.name = "arm-pmu",
.id = ARM_PMU_DEVICE_CPU,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
static struct resource char_lcd_resources[] = {
{
.start = REALVIEW_CHAR_LCD_BASE,
.end = (REALVIEW_CHAR_LCD_BASE + SZ_4K - 1),
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_EB_CHARLCD,
.end = IRQ_EB_CHARLCD,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device char_lcd_device = {
.name = "arm-charlcd",
.id = -1,
.num_resources = ARRAY_SIZE(char_lcd_resources),
.resource = char_lcd_resources,
};
static void __init gic_init_irq(void)
{
if (core_tile_eb11mp() || core_tile_a9mp()) {
unsigned int pldctrl;
/* new irq mode */
writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + REALVIEW_EB11MP_SYS_PLD_CTRL1);
pldctrl |= 0x00800000;
writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + REALVIEW_EB11MP_SYS_PLD_CTRL1);
writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
/* core tile GIC, primary */
gic_init(0, 29, __io_address(REALVIEW_EB11MP_GIC_DIST_BASE),
__io_address(REALVIEW_EB11MP_GIC_CPU_BASE));
#ifndef CONFIG_REALVIEW_EB_ARM11MP_REVB
/* board GIC, secondary */
gic_init(1, 96, __io_address(REALVIEW_EB_GIC_DIST_BASE),
__io_address(REALVIEW_EB_GIC_CPU_BASE));
gic_cascade_irq(1, IRQ_EB11MP_EB_IRQ1);
#endif
} else {
/* board GIC, primary */
gic_init(0, 29, __io_address(REALVIEW_EB_GIC_DIST_BASE),
__io_address(REALVIEW_EB_GIC_CPU_BASE));
}
}
/*
* Fix up the IRQ numbers for the RealView EB/ARM11MPCore tile
*/
static void realview_eb11mp_fixup(void)
{
/* AMBA devices */
dmac_device.irq[0] = IRQ_EB11MP_DMA;
uart0_device.irq[0] = IRQ_EB11MP_UART0;
uart1_device.irq[0] = IRQ_EB11MP_UART1;
uart2_device.irq[0] = IRQ_EB11MP_UART2;
uart3_device.irq[0] = IRQ_EB11MP_UART3;
clcd_device.irq[0] = IRQ_EB11MP_CLCD;
wdog_device.irq[0] = IRQ_EB11MP_WDOG;
gpio0_device.irq[0] = IRQ_EB11MP_GPIO0;
gpio1_device.irq[0] = IRQ_EB11MP_GPIO1;
gpio2_device.irq[0] = IRQ_EB11MP_GPIO2;
rtc_device.irq[0] = IRQ_EB11MP_RTC;
sci0_device.irq[0] = IRQ_EB11MP_SCI;
ssp0_device.irq[0] = IRQ_EB11MP_SSP;
aaci_device.irq[0] = IRQ_EB11MP_AACI;
mmc0_device.irq[0] = IRQ_EB11MP_MMCI0A;
mmc0_device.irq[1] = IRQ_EB11MP_MMCI0B;
kmi0_device.irq[0] = IRQ_EB11MP_KMI0;
kmi1_device.irq[0] = IRQ_EB11MP_KMI1;
/* platform devices */
realview_eb_eth_resources[1].start = IRQ_EB11MP_ETH;
realview_eb_eth_resources[1].end = IRQ_EB11MP_ETH;
realview_eb_isp1761_resources[1].start = IRQ_EB11MP_USB;
realview_eb_isp1761_resources[1].end = IRQ_EB11MP_USB;
}
static void __init realview_eb_timer_init(void)
{
unsigned int timer_irq;
timer0_va_base = __io_address(REALVIEW_EB_TIMER0_1_BASE);
timer1_va_base = __io_address(REALVIEW_EB_TIMER0_1_BASE) + 0x20;
timer2_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE);
timer3_va_base = __io_address(REALVIEW_EB_TIMER2_3_BASE) + 0x20;
if (core_tile_eb11mp() || core_tile_a9mp()) {
#ifdef CONFIG_LOCAL_TIMERS
twd_base = __io_address(REALVIEW_EB11MP_TWD_BASE);
#endif
timer_irq = IRQ_EB11MP_TIMER0_1;
} else
timer_irq = IRQ_EB_TIMER0_1;
realview_timer_init(timer_irq);
}
static struct sys_timer realview_eb_timer = {
.init = realview_eb_timer_init,
};
static void realview_eb_reset(char mode)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
/*
* To reset, we hit the on-board reset register
* in the system FPGA
*/
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
if (core_tile_eb11mp())
__raw_writel(0x0008, reset_ctrl);
}
static void __init realview_eb_init(void)
{
int i;
if (core_tile_eb11mp() || core_tile_a9mp()) {
realview_eb11mp_fixup();
#ifdef CONFIG_CACHE_L2X0
/* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
* Bits: .... ...0 0111 1001 0000 .... .... .... */
l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif
platform_device_register(&pmu_device);
}
realview_flash_register(&realview_eb_flash_resource, 1);
platform_device_register(&realview_i2c_device);
platform_device_register(&char_lcd_device);
eth_device_register();
realview_usb_register(realview_eb_isp1761_resources);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
}
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
realview_reset = realview_eb_reset;
}
MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.atag_offset = 0x100,
.fixup = realview_fixup,
.map_io = realview_eb_map_io,
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_eb_timer,
.handle_irq = gic_handle_irq,
.init_machine = realview_eb_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
MACHINE_END
| zhengsjembest/linux_am335x | linux-3.2.0-psp04.06.00.08.sdk/arch/arm/mach-realview/realview_eb.c | C | gpl-2.0 | 13,095 |
/*
* linux/drivers/media/video/samsung/mfc5x/mfc_buf.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Buffer manager for Samsung MFC (Multi Function Codec - FIMV) driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MFC_BUF_H_
#define __MFC_BUF_H_ __FILE__
#include <linux/list.h>
#include "mfc.h"
#include "mfc_inst.h"
#include "mfc_interface.h"
/* FIXME */
#define ALIGN_4B (1 << 2)
#define ALIGN_2KB (1 << 11)
#define ALIGN_4KB (1 << 12)
#define ALIGN_8KB (1 << 13)
#define ALIGN_64KB (1 << 16)
#define ALIGN_128KB (1 << 17)
#define ALIGN_W 128
#define ALIGN_H 32
/* System */ /* Size, Port, Align */
#define MFC_FW_SYSTEM_SIZE (0x80000) /* 512KB, A, N(4KB for VMEM) */
/* Instance */
#define MFC_CTX_SIZE_L (0x96000) /* 600KB, N, 2KB, H.264 Decoding only */
#define MFC_CTX_SIZE (0x2800) /* 10KB, N, 2KB */
#define MFC_SHM_SIZE (0x400) /* 1KB, N, 4B */
/* Decoding */
#define MFC_CPB_SIZE (0x400000) /* Max.4MB, A, 2KB */
#define MFC_DESC_SIZE (0x20000) /* Max.128KB, A, 2KB */
#define MFC_DEC_NBMV_SIZE (0x4000) /* 16KB, A, 2KB */
#define MFC_DEC_NBIP_SIZE (0x8000) /* 32KB, A, 2KB */
#define MFC_DEC_NBDCAC_SIZE (0x4000) /* 16KB, A, 2KB */
#define MFC_DEC_UPNBMV_SIZE (0x11000) /* 68KB, A, 2KB */
#define MFC_DEC_SAMV_SIZE (0x40000) /* 256KB, A, 2KB */
#define MFC_DEC_OTLINE_SIZE (0x8000) /* 32KB, A, 2KB */
#define MFC_DEC_SYNPAR_SIZE (0x11000) /* 68KB, A, 2KB */
#define MFC_DEC_BITPLANE_SIZE (0x800) /* 2KB, A, 2KB */
/* Encoding */
#define MFC_STRM_SIZE (0x300000) /* 3MB, A, 2KB (multi. 4KB) */
/* FIXME: variable size */
#define MFC_ENC_UPMV_SIZE (0x10000) /* Var, A, 2KB */
#define MFC_ENC_COLFLG_SIZE (0x10000) /* Var, A, 2KB */
#define MFC_ENC_INTRAMD_SIZE (0x10000) /* Var, A, 2KB */
#define MFC_ENC_INTRAPRED_SIZE (0x4000) /* 16KB, A, 2KB */
#define MFC_ENC_NBORINFO_SIZE (0x10000) /* Var, A, 2KB */
#define MFC_ENC_ACDCCOEF_SIZE (0x10000) /* Var, A, 2KB */
#define MFC_LUMA_ALIGN ALIGN_8KB
#define MFC_CHROMA_ALIGN ALIGN_8KB
#define MFC_MV_ALIGN ALIGN_8KB /* H.264 Decoding only */
#define PORT_A 0
#define PORT_B 1
/* FIXME: MFC Buffer Type add as allocation parameter */
/*
#define MBT_ACCESS_MASK (0xFF << 24)
#define MBT_SYSMMU (0x01 << 24)
*/
#define MBT_KERNEL (0x02 << 24)
#define MBT_USER (0x04 << 24)
#define MBT_OTHER (0x08 << 24)
#if 0
#define MBT_TYPE_MASK (0xFF << 16)
#define MBT_CTX (MBT_SYSMMU | MBT_KERNEL | (0x01 << 16))/* S, K */
#define MBT_DESC (MBT_SYSMMU | (0x02 << 16)) /* S */
#define MBT_CODEC (MBT_SYSMMU | (0x04 << 16)) /* S */
#define MBT_SHM (MBT_SYSMMU | MBT_KERNEL | (0x08 << 16))/* S, K */
#define MBT_CPB (MBT_SYSMMU | MBT_USER | (0x10 << 16))/* D: S, [K], U E: */
#define MBT_DPB (MBT_SYSMMU | MBT_USER | (0x20 << 16))/* D: S, [K], U E: */
#endif
#define MBT_CTX (MBT_KERNEL | (0x01 << 16)) /* S, K */
#define MBT_DESC (0x02 << 16) /* S */
#define MBT_CODEC (0x04 << 16) /* S */
#define MBT_SHM (MBT_KERNEL | (0x08 << 16)) /* S, K */
#if 0
#define MBT_CPB (MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */
#define MBT_DPB (MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */
#endif
#define MBT_CPB (MBT_KERNEL | MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */
#define MBT_DPB (MBT_KERNEL | MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */
enum MFC_BUF_ALLOC_SCHEME {
MBS_BEST_FIT = 0,
MBS_FIRST_FIT = 1,
};
/* Remove before Release */
#if 0
#define CPB_BUF_SIZE (0x400000) /* 3MB : 3x1024x1024 for decoder */
#define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */
#define SHARED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */
#define PRED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */
#define DEC_CODEC_BUF_SIZE (0x80000) /* 512KB : 512x1024 size per instance */
#define ENC_CODEC_BUF_SIZE (0x50000) /* 320KB : 512x1024 size per instance */
#define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */
#define MV_BUF_SIZE (0x10000) /* 64KB : 64x1024 for encoder */
#define MFC_CONTEXT_SIZE_L (640 * 1024) /* 600KB -> 640KB for alignment */
#define VC1DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define MPEG2DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define H263DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define MPEG4DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define H264ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define MPEG4ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define H263ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
#define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */
#define SHARED_MEM_SIZE (0x1000) /* 4KB : 4x1024 size */
#define CPB_BUF_SIZE (0x400000) /* 4MB : 4x1024x1024 for decoder */
#define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */
#define ENC_UP_INTRA_PRED_SIZE (0x10000) /* 64KB : 64x1024 for encoder */
#endif
struct mfc_alloc_buffer {
struct list_head list;
unsigned long real; /* phys. or virt. addr for MFC */
unsigned int size; /* allocation size */
unsigned char *addr; /* kernel virtual address space */
unsigned int type; /* buffer type */
int owner; /* instance context id */
#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
struct vcm_mmu_res *vcm_s;
struct vcm_res *vcm_k;
unsigned long vcm_addr;
size_t vcm_size;
void *ump_handle;
#elif defined(CONFIG_S5P_VMEM)
unsigned int vmem_cookie;
unsigned long vmem_addr;
size_t vmem_size;
#else
unsigned int ofs; /*
* offset phys. or virt. contiguous memory
* phys.[bootmem, memblock] virt.[vmalloc]
* when user use mmap,
* user can access whole of memory by offset.
*/
#endif
};
struct mfc_free_buffer {
struct list_head list;
unsigned long real; /* phys. or virt. addr for MFC */
unsigned int size;
};
void mfc_print_buf(void);
int mfc_init_buf(void);
void mfc_final_buf(void);
void mfc_set_buf_alloc_scheme(enum MFC_BUF_ALLOC_SCHEME scheme);
void mfc_merge_buf(void);
struct mfc_alloc_buffer *_mfc_alloc_buf(
struct mfc_inst_ctx *ctx, unsigned int size, int align, int flag);
int mfc_alloc_buf(
struct mfc_inst_ctx *ctx, struct mfc_buf_alloc_arg* args, int flag);
int _mfc_free_buf(unsigned long real);
int mfc_free_buf(struct mfc_inst_ctx *ctx, unsigned int key);
void mfc_free_buf_type(int owner, int type);
void mfc_free_buf_inst(int owner);
unsigned long mfc_get_buf_real(int owner, unsigned int key);
/*
unsigned char *mfc_get_buf_addr(int owner, unsigned char *user);
unsigned char *_mfc_get_buf_addr(int owner, unsigned char *user);
*/
#ifdef CONFIG_VIDEO_MFC_VCM_UMP
unsigned int mfc_vcm_bind_from_others(struct mfc_inst_ctx *ctx,
struct mfc_buf_alloc_arg *args, int flag);
void *mfc_get_buf_ump_handle(unsigned long real);
#endif
#endif /* __MFC_BUF_H_ */
| OtherCrashOverride/linux | drivers/media/video/samsung/mfc5x/mfc_buf.h | C | gpl-2.0 | 7,152 |
/* A Bison parser, made by GNU Bison 1.875. */
/* Skeleton parser for Yacc-like parsing with Bison,
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* As a special exception, when this file is copied by Bison into a
Bison output file, you may use that output file without restriction.
This special exception was added by the Free Software Foundation
in version 1.24 of Bison. */
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
TOK_IDENT = 258,
TOK_ATIDENT = 259,
TOK_CONST_INT = 260,
TOK_CONST_FLOAT = 261,
TOK_CONST_MVA = 262,
TOK_QUOTED_STRING = 263,
TOK_USERVAR = 264,
TOK_SYSVAR = 265,
TOK_CONST_STRINGS = 266,
TOK_BAD_NUMERIC = 267,
TOK_SUBKEY = 268,
TOK_DOT_NUMBER = 269,
TOK_ADD = 270,
TOK_AGENT = 271,
TOK_ALTER = 272,
TOK_AS = 273,
TOK_ASC = 274,
TOK_ATTACH = 275,
TOK_ATTRIBUTES = 276,
TOK_AVG = 277,
TOK_BEGIN = 278,
TOK_BETWEEN = 279,
TOK_BIGINT = 280,
TOK_BOOL = 281,
TOK_BY = 282,
TOK_CALL = 283,
TOK_CHARACTER = 284,
TOK_CHUNK = 285,
TOK_COLLATION = 286,
TOK_COLUMN = 287,
TOK_COMMIT = 288,
TOK_COMMITTED = 289,
TOK_COUNT = 290,
TOK_CREATE = 291,
TOK_DATABASES = 292,
TOK_DELETE = 293,
TOK_DESC = 294,
TOK_DESCRIBE = 295,
TOK_DISTINCT = 296,
TOK_DIV = 297,
TOK_DOUBLE = 298,
TOK_DROP = 299,
TOK_FACET = 300,
TOK_FALSE = 301,
TOK_FLOAT = 302,
TOK_FLUSH = 303,
TOK_FOR = 304,
TOK_FROM = 305,
TOK_FUNCTION = 306,
TOK_GLOBAL = 307,
TOK_GROUP = 308,
TOK_GROUPBY = 309,
TOK_GROUP_CONCAT = 310,
TOK_HAVING = 311,
TOK_ID = 312,
TOK_IN = 313,
TOK_INDEX = 314,
TOK_INSERT = 315,
TOK_INT = 316,
TOK_INTEGER = 317,
TOK_INTO = 318,
TOK_IS = 319,
TOK_ISOLATION = 320,
TOK_JSON = 321,
TOK_LEVEL = 322,
TOK_LIKE = 323,
TOK_LIMIT = 324,
TOK_MATCH = 325,
TOK_MAX = 326,
TOK_META = 327,
TOK_MIN = 328,
TOK_MOD = 329,
TOK_MULTI = 330,
TOK_MULTI64 = 331,
TOK_NAMES = 332,
TOK_NULL = 333,
TOK_OPTION = 334,
TOK_ORDER = 335,
TOK_OPTIMIZE = 336,
TOK_PLAN = 337,
TOK_PLUGIN = 338,
TOK_PLUGINS = 339,
TOK_PROFILE = 340,
TOK_RAND = 341,
TOK_RAMCHUNK = 342,
TOK_READ = 343,
TOK_RECONFIGURE = 344,
TOK_RELOAD = 345,
TOK_REPEATABLE = 346,
TOK_REPLACE = 347,
TOK_REMAP = 348,
TOK_RETURNS = 349,
TOK_ROLLBACK = 350,
TOK_RTINDEX = 351,
TOK_SELECT = 352,
TOK_SERIALIZABLE = 353,
TOK_SET = 354,
TOK_SETTINGS = 355,
TOK_SESSION = 356,
TOK_SHOW = 357,
TOK_SONAME = 358,
TOK_START = 359,
TOK_STATUS = 360,
TOK_STRING = 361,
TOK_SUM = 362,
TOK_TABLE = 363,
TOK_TABLES = 364,
TOK_THREADS = 365,
TOK_TO = 366,
TOK_TRANSACTION = 367,
TOK_TRUE = 368,
TOK_TRUNCATE = 369,
TOK_TYPE = 370,
TOK_UNCOMMITTED = 371,
TOK_UPDATE = 372,
TOK_VALUES = 373,
TOK_VARIABLES = 374,
TOK_WARNINGS = 375,
TOK_WEIGHT = 376,
TOK_WHERE = 377,
TOK_WITHIN = 378,
TOK_OR = 379,
TOK_AND = 380,
TOK_NE = 381,
TOK_GTE = 382,
TOK_LTE = 383,
TOK_NOT = 384,
TOK_NEG = 385
};
#endif
#define TOK_IDENT 258
#define TOK_ATIDENT 259
#define TOK_CONST_INT 260
#define TOK_CONST_FLOAT 261
#define TOK_CONST_MVA 262
#define TOK_QUOTED_STRING 263
#define TOK_USERVAR 264
#define TOK_SYSVAR 265
#define TOK_CONST_STRINGS 266
#define TOK_BAD_NUMERIC 267
#define TOK_SUBKEY 268
#define TOK_DOT_NUMBER 269
#define TOK_ADD 270
#define TOK_AGENT 271
#define TOK_ALTER 272
#define TOK_AS 273
#define TOK_ASC 274
#define TOK_ATTACH 275
#define TOK_ATTRIBUTES 276
#define TOK_AVG 277
#define TOK_BEGIN 278
#define TOK_BETWEEN 279
#define TOK_BIGINT 280
#define TOK_BOOL 281
#define TOK_BY 282
#define TOK_CALL 283
#define TOK_CHARACTER 284
#define TOK_CHUNK 285
#define TOK_COLLATION 286
#define TOK_COLUMN 287
#define TOK_COMMIT 288
#define TOK_COMMITTED 289
#define TOK_COUNT 290
#define TOK_CREATE 291
#define TOK_DATABASES 292
#define TOK_DELETE 293
#define TOK_DESC 294
#define TOK_DESCRIBE 295
#define TOK_DISTINCT 296
#define TOK_DIV 297
#define TOK_DOUBLE 298
#define TOK_DROP 299
#define TOK_FACET 300
#define TOK_FALSE 301
#define TOK_FLOAT 302
#define TOK_FLUSH 303
#define TOK_FOR 304
#define TOK_FROM 305
#define TOK_FUNCTION 306
#define TOK_GLOBAL 307
#define TOK_GROUP 308
#define TOK_GROUPBY 309
#define TOK_GROUP_CONCAT 310
#define TOK_HAVING 311
#define TOK_ID 312
#define TOK_IN 313
#define TOK_INDEX 314
#define TOK_INSERT 315
#define TOK_INT 316
#define TOK_INTEGER 317
#define TOK_INTO 318
#define TOK_IS 319
#define TOK_ISOLATION 320
#define TOK_JSON 321
#define TOK_LEVEL 322
#define TOK_LIKE 323
#define TOK_LIMIT 324
#define TOK_MATCH 325
#define TOK_MAX 326
#define TOK_META 327
#define TOK_MIN 328
#define TOK_MOD 329
#define TOK_MULTI 330
#define TOK_MULTI64 331
#define TOK_NAMES 332
#define TOK_NULL 333
#define TOK_OPTION 334
#define TOK_ORDER 335
#define TOK_OPTIMIZE 336
#define TOK_PLAN 337
#define TOK_PLUGIN 338
#define TOK_PLUGINS 339
#define TOK_PROFILE 340
#define TOK_RAND 341
#define TOK_RAMCHUNK 342
#define TOK_READ 343
#define TOK_RECONFIGURE 344
#define TOK_RELOAD 345
#define TOK_REPEATABLE 346
#define TOK_REPLACE 347
#define TOK_REMAP 348
#define TOK_RETURNS 349
#define TOK_ROLLBACK 350
#define TOK_RTINDEX 351
#define TOK_SELECT 352
#define TOK_SERIALIZABLE 353
#define TOK_SET 354
#define TOK_SETTINGS 355
#define TOK_SESSION 356
#define TOK_SHOW 357
#define TOK_SONAME 358
#define TOK_START 359
#define TOK_STATUS 360
#define TOK_STRING 361
#define TOK_SUM 362
#define TOK_TABLE 363
#define TOK_TABLES 364
#define TOK_THREADS 365
#define TOK_TO 366
#define TOK_TRANSACTION 367
#define TOK_TRUE 368
#define TOK_TRUNCATE 369
#define TOK_TYPE 370
#define TOK_UNCOMMITTED 371
#define TOK_UPDATE 372
#define TOK_VALUES 373
#define TOK_VARIABLES 374
#define TOK_WARNINGS 375
#define TOK_WEIGHT 376
#define TOK_WHERE 377
#define TOK_WITHIN 378
#define TOK_OR 379
#define TOK_AND 380
#define TOK_NE 381
#define TOK_GTE 382
#define TOK_LTE 383
#define TOK_NOT 384
#define TOK_NEG 385
#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
typedef int YYSTYPE;
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
# define YYSTYPE_IS_TRIVIAL 1
#endif
| omelnic/sphinxsearch | src/yysphinxql.h | C | gpl-2.0 | 7,399 |
/*
* ISP1362 HCD (Host Controller Driver) for USB.
*
* Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
*
* Derived from the SL811 HCD, rewritten for ISP116x.
* Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
*
* Portions:
* Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
* Copyright (C) 2004 David Brownell
*/
/*
* The ISP1362 chip requires a large delay (300ns and 462ns) between
* accesses to the address and data register.
* The following timing options exist:
*
* 1. Configure your memory controller to add such delays if it can (the best)
* 2. Implement platform-specific delay function possibly
* combined with configuring the memory controller; see
* include/linux/usb_isp1362.h for more info.
* 3. Use ndelay (easiest, poorest).
*
* Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
* platform specific section of isp1362.h to select the appropriate variant.
*
* Also note that according to the Philips "ISP1362 Errata" document
* Rev 1.00 from 27 May data corruption may occur when the #WR signal
* is reasserted (even with #CS deasserted) within 132ns after a
* write cycle to any controller register. If the hardware doesn't
* implement the recommended fix (gating the #WR with #CS) software
* must ensure that no further write cycle (not necessarily to the chip!)
* is issued by the CPU within this interval.
* For PXA25x this can be ensured by using VLIO with the maximum
* recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
*/
#ifdef CONFIG_USB_DEBUG
# define ISP1362_DEBUG
#else
# undef ISP1362_DEBUG
#endif
/*
* The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
* GET_INTERFACE requests correctly when the SETUP and DATA stages of the
* requests are carried out in separate frames. This will delay any SETUP
* packets until the start of the next frame so that this situation is
* unlikely to occur (and makes usbtest happy running with a PXA255 target
* device).
*/
#undef BUGGY_PXA2XX_UDC_USBTEST
#undef PTD_TRACE
#undef URB_TRACE
#undef VERBOSE
#undef REGISTERS
/* This enables a memory test on the ISP1362 chip memory to make sure the
* chip access timing is correct.
*/
#undef CHIP_BUFFER_TEST
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/isp1362.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
#include <linux/bitmap.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
static int dbg_level;
#ifdef ISP1362_DEBUG
module_param(dbg_level, int, 0644);
#else
module_param(dbg_level, int, 0);
#define STUB_DEBUG_FILE
#endif
#include "../core/hcd.h"
#include "../core/usb.h"
#include "isp1362.h"
#define DRIVER_VERSION "2005-04-04"
#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static const char hcd_name[] = "isp1362-hcd";
static void isp1362_hc_stop(struct usb_hcd *hcd);
static int isp1362_hc_start(struct usb_hcd *hcd);
/*-------------------------------------------------------------------------*/
/*
* When called from the interrupthandler only isp1362_hcd->irqenb is modified,
* since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
* completion.
* We don't need a 'disable' counterpart, since interrupts will be disabled
* only by the interrupt handler.
*/
static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
{
if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
return;
if (mask & ~isp1362_hcd->irqenb)
isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
isp1362_hcd->irqenb |= mask;
if (isp1362_hcd->irq_active)
return;
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
}
/*-------------------------------------------------------------------------*/
static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
u16 offset)
{
struct isp1362_ep_queue *epq = NULL;
if (offset < isp1362_hcd->istl_queue[1].buf_start)
epq = &isp1362_hcd->istl_queue[0];
else if (offset < isp1362_hcd->intl_queue.buf_start)
epq = &isp1362_hcd->istl_queue[1];
else if (offset < isp1362_hcd->atl_queue.buf_start)
epq = &isp1362_hcd->intl_queue;
else if (offset < isp1362_hcd->atl_queue.buf_start +
isp1362_hcd->atl_queue.buf_size)
epq = &isp1362_hcd->atl_queue;
if (epq)
DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
else
pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
return epq;
}
static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
{
int offset;
if (index * epq->blk_size > epq->buf_size) {
pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
epq->buf_size / epq->blk_size);
return -EINVAL;
}
offset = epq->buf_start + index * epq->blk_size;
DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
return offset;
}
/*-------------------------------------------------------------------------*/
static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
int mps)
{
u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
if (xfer_size < size && xfer_size % mps)
xfer_size -= xfer_size % mps;
return xfer_size;
}
static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
struct isp1362_ep *ep, u16 len)
{
int ptd_offset = -EINVAL;
int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
int found;
BUG_ON(len > epq->buf_size);
if (!epq->buf_avail)
return -ENOMEM;
if (ep->num_ptds)
pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
BUG_ON(ep->num_ptds != 0);
found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
num_ptds, 0);
if (found >= epq->buf_count)
return -EOVERFLOW;
DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
ptd_offset = get_ptd_offset(epq, found);
WARN_ON(ptd_offset < 0);
ep->ptd_offset = ptd_offset;
ep->num_ptds += num_ptds;
epq->buf_avail -= num_ptds;
BUG_ON(epq->buf_avail > epq->buf_count);
ep->ptd_index = found;
bitmap_set(&epq->buf_map, found, num_ptds);
DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
__func__, epq->name, ep->ptd_index, ep->ptd_offset,
epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
return found;
}
static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
int index = ep->ptd_index;
int last = ep->ptd_index + ep->num_ptds;
if (last > epq->buf_count)
pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
__func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
epq->buf_map, epq->skip_map);
BUG_ON(last > epq->buf_count);
for (; index < last; index++) {
__clear_bit(index, &epq->buf_map);
__set_bit(index, &epq->skip_map);
}
epq->buf_avail += ep->num_ptds;
epq->ptd_count--;
BUG_ON(epq->buf_avail > epq->buf_count);
BUG_ON(epq->ptd_count > epq->buf_count);
DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
__func__, epq->name,
ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
epq->buf_map, epq->skip_map);
ep->num_ptds = 0;
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
}
/*-------------------------------------------------------------------------*/
/*
Set up PTD's.
*/
static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
u16 fno)
{
struct ptd *ptd;
int toggle;
int dir;
u16 len;
size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
ptd = &ep->ptd;
ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
switch (ep->nextpid) {
case USB_PID_IN:
toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
dir = PTD_DIR_IN;
if (usb_pipecontrol(urb->pipe)) {
len = min_t(size_t, ep->maxpacket, buf_len);
} else if (usb_pipeisoc(urb->pipe)) {
len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
} else
len = max_transfer_size(epq, buf_len, ep->maxpacket);
DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
(int)buf_len);
break;
case USB_PID_OUT:
toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
dir = PTD_DIR_OUT;
if (usb_pipecontrol(urb->pipe))
len = min_t(size_t, ep->maxpacket, buf_len);
else if (usb_pipeisoc(urb->pipe))
len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
else
len = max_transfer_size(epq, buf_len, ep->maxpacket);
if (len == 0)
pr_info("%s: Sending ZERO packet: %d\n", __func__,
urb->transfer_flags & URB_ZERO_PACKET);
DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
(int)buf_len);
break;
case USB_PID_SETUP:
toggle = 0;
dir = PTD_DIR_SETUP;
len = sizeof(struct usb_ctrlrequest);
DBG(1, "%s: SETUP len %d\n", __func__, len);
ep->data = urb->setup_packet;
break;
case USB_PID_ACK:
toggle = 1;
len = 0;
dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
PTD_DIR_OUT : PTD_DIR_IN;
DBG(1, "%s: ACK len %d\n", __func__, len);
break;
default:
toggle = dir = len = 0;
pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
BUG_ON(1);
}
ep->length = len;
if (!len)
ep->data = NULL;
ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
PTD_EP(ep->epnum);
ptd->len = PTD_LEN(len) | PTD_DIR(dir);
ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
if (usb_pipeint(urb->pipe)) {
ptd->faddr |= PTD_SF_INT(ep->branch);
ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
}
if (usb_pipeisoc(urb->pipe))
ptd->faddr |= PTD_SF_ISO(fno);
DBG(1, "%s: Finished\n", __func__);
}
static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct isp1362_ep_queue *epq)
{
struct ptd *ptd = &ep->ptd;
int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
_BUG_ON(ep->ptd_offset < 0);
prefetch(ptd);
isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
if (len)
isp1362_write_buffer(isp1362_hcd, ep->data,
ep->ptd_offset + PTD_HEADER_SIZE, len);
dump_ptd(ptd);
dump_ptd_out_data(ptd, ep->data);
}
static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct isp1362_ep_queue *epq)
{
struct ptd *ptd = &ep->ptd;
int act_len;
WARN_ON(list_empty(&ep->active));
BUG_ON(ep->ptd_offset < 0);
list_del_init(&ep->active);
DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
prefetchw(ptd);
isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
dump_ptd(ptd);
act_len = PTD_GET_COUNT(ptd);
if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
return;
if (act_len > ep->length)
pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
ep->ptd_offset, act_len, ep->length);
BUG_ON(act_len > ep->length);
/* Only transfer the amount of data that has actually been overwritten
* in the chip buffer. We don't want any data that doesn't belong to the
* transfer to leak out of the chip to the callers transfer buffer!
*/
prefetchw(ep->data);
isp1362_read_buffer(isp1362_hcd, ep->data,
ep->ptd_offset + PTD_HEADER_SIZE, act_len);
dump_ptd_in_data(ptd, ep->data);
}
/*
* INT PTDs will stay in the chip until data is available.
* This function will remove a PTD from the chip when the URB is dequeued.
* Must be called with the spinlock held and IRQs disabled
*/
static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
{
int index;
struct isp1362_ep_queue *epq;
DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
BUG_ON(ep->ptd_offset < 0);
epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
BUG_ON(!epq);
/* put ep in remove_list for cleanup */
WARN_ON(!list_empty(&ep->remove_list));
list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
/* let SOF interrupt handle the cleanup */
isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
index = ep->ptd_index;
if (index < 0)
/* ISO queues don't have SKIP registers */
return;
DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
index, ep->ptd_offset, epq->skip_map, 1 << index);
/* prevent further processing of PTD (will be effective after next SOF) */
epq->skip_map |= 1 << index;
if (epq == &isp1362_hcd->atl_queue) {
DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
if (~epq->skip_map == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
} else if (epq == &isp1362_hcd->intl_queue) {
DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
if (~epq->skip_map == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
}
}
/*
Take done or failed requests out of schedule. Give back
processed urbs.
*/
static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
struct urb *urb, int status)
__releases(isp1362_hcd->lock)
__acquires(isp1362_hcd->lock)
{
urb->hcpriv = NULL;
ep->error_count = 0;
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_SETUP;
URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
ep->num_req, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
!usb_pipein(urb->pipe) ? "out" : "in",
usb_pipecontrol(urb->pipe) ? "ctrl" :
usb_pipeint(urb->pipe) ? "int" :
usb_pipebulk(urb->pipe) ? "bulk" :
"iso",
urb->actual_length, urb->transfer_buffer_length,
!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
"short_ok" : "", urb->status);
usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
spin_unlock(&isp1362_hcd->lock);
usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
spin_lock(&isp1362_hcd->lock);
/* take idle endpoints out of the schedule right away */
if (!list_empty(&ep->hep->urb_list))
return;
/* async deschedule */
if (!list_empty(&ep->schedule)) {
list_del_init(&ep->schedule);
return;
}
if (ep->interval) {
/* periodic deschedule */
DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
ep, ep->branch, ep->load,
isp1362_hcd->load[ep->branch],
isp1362_hcd->load[ep->branch] - ep->load);
isp1362_hcd->load[ep->branch] -= ep->load;
ep->branch = PERIODIC_SIZE;
}
}
/*
* Analyze transfer results, handle partial transfers and errors
*/
static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
{
struct urb *urb = get_urb(ep);
struct usb_device *udev;
struct ptd *ptd;
int short_ok;
u16 len;
int urbstat = -EINPROGRESS;
u8 cc;
DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
udev = urb->dev;
ptd = &ep->ptd;
cc = PTD_GET_CC(ptd);
if (cc == PTD_NOTACCESSED) {
pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
ep->num_req, ptd);
cc = PTD_DEVNOTRESP;
}
short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
len = urb->transfer_buffer_length - urb->actual_length;
/* Data underrun is special. For allowed underrun
we clear the error and continue as normal. For
forbidden underrun we finish the DATA stage
immediately while for control transfer,
we do a STATUS stage.
*/
if (cc == PTD_DATAUNDERRUN) {
if (short_ok) {
DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
__func__, ep->num_req, short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
cc = PTD_CC_NOERROR;
urbstat = 0;
} else {
DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
__func__, ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
if (usb_pipecontrol(urb->pipe)) {
ep->nextpid = USB_PID_ACK;
/* save the data underrun error code for later and
* procede with the status stage
*/
urb->actual_length += PTD_GET_COUNT(ptd);
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
if (urb->status == -EINPROGRESS)
urb->status = cc_to_error[PTD_DATAUNDERRUN];
} else {
usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
PTD_GET_TOGGLE(ptd));
urbstat = cc_to_error[PTD_DATAUNDERRUN];
}
goto out;
}
}
if (cc != PTD_CC_NOERROR) {
if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
urbstat = cc_to_error[cc];
DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
__func__, ep->num_req, ep->nextpid, urbstat, cc,
ep->error_count);
}
goto out;
}
switch (ep->nextpid) {
case USB_PID_OUT:
if (PTD_GET_COUNT(ptd) != ep->length)
pr_err("%s: count=%d len=%d\n", __func__,
PTD_GET_COUNT(ptd), ep->length);
BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
urb->actual_length += ep->length;
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
if (urb->actual_length == urb->transfer_buffer_length) {
DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
ep->num_req, len, ep->maxpacket, urbstat);
if (usb_pipecontrol(urb->pipe)) {
DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT");
ep->nextpid = USB_PID_ACK;
} else {
if (len % ep->maxpacket ||
!(urb->transfer_flags & URB_ZERO_PACKET)) {
urbstat = 0;
DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
__func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
urbstat, len, ep->maxpacket, urb->actual_length);
}
}
}
break;
case USB_PID_IN:
len = PTD_GET_COUNT(ptd);
BUG_ON(len > ep->length);
urb->actual_length += len;
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
/* if transfer completed or (allowed) data underrun */
if ((urb->transfer_buffer_length == urb->actual_length) ||
len % ep->maxpacket) {
DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
ep->num_req, len, ep->maxpacket, urbstat);
if (usb_pipecontrol(urb->pipe)) {
DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
ep->num_req,
usb_pipein(urb->pipe) ? "IN" : "OUT");
ep->nextpid = USB_PID_ACK;
} else {
urbstat = 0;
DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
__func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
urbstat, len, ep->maxpacket, urb->actual_length);
}
}
break;
case USB_PID_SETUP:
if (urb->transfer_buffer_length == urb->actual_length) {
ep->nextpid = USB_PID_ACK;
} else if (usb_pipeout(urb->pipe)) {
usb_settoggle(udev, 0, 1, 1);
ep->nextpid = USB_PID_OUT;
} else {
usb_settoggle(udev, 0, 0, 1);
ep->nextpid = USB_PID_IN;
}
break;
case USB_PID_ACK:
DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
urbstat);
WARN_ON(urbstat != -EINPROGRESS);
urbstat = 0;
ep->nextpid = 0;
break;
default:
BUG_ON(1);
}
out:
if (urbstat != -EINPROGRESS) {
DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
ep, ep->num_req, urb, urbstat);
finish_request(isp1362_hcd, ep, urb, urbstat);
}
}
static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
struct isp1362_ep_queue *epq =
get_ptd_queue(isp1362_hcd, ep->ptd_offset);
int index = ep->ptd_index;
BUG_ON(epq == NULL);
if (index >= 0) {
DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
BUG_ON(ep->num_ptds == 0);
release_ptd_buffers(epq, ep);
}
if (!list_empty(&ep->hep->urb_list)) {
struct urb *urb = get_urb(ep);
DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
ep->num_req, ep);
finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
}
WARN_ON(list_empty(&ep->active));
if (!list_empty(&ep->active)) {
list_del_init(&ep->active);
DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
}
list_del_init(&ep->remove_list);
DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
}
DBG(1, "%s: Done\n", __func__);
}
static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
{
if (count > 0) {
if (count < isp1362_hcd->atl_queue.ptd_count)
isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
} else
isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
}
static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
{
isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
}
static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
{
isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
}
static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
{
int index = epq->free_ptd;
prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
index = claim_ptd_buffers(epq, ep, ep->length);
if (index == -ENOMEM) {
DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
return index;
} else if (index == -EOVERFLOW) {
DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
__func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
epq->buf_map, epq->skip_map);
return index;
} else
BUG_ON(index < 0);
list_add_tail(&ep->active, &epq->active);
DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
ep, ep->num_req, ep->length, &epq->active);
DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
ep->ptd_offset, ep, ep->num_req);
isp1362_write_ptd(isp1362_hcd, ep, epq);
__clear_bit(ep->ptd_index, &epq->skip_map);
return 0;
}
static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
struct isp1362_ep *ep;
int defer = 0;
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
struct urb *urb = get_urb(ep);
int ret;
if (!list_empty(&ep->active)) {
DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
continue;
}
DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
ep, ep->num_req);
ret = submit_req(isp1362_hcd, urb, ep, epq);
if (ret == -ENOMEM) {
defer = 1;
break;
} else if (ret == -EOVERFLOW) {
defer = 1;
continue;
}
#ifdef BUGGY_PXA2XX_UDC_USBTEST
defer = ep->nextpid == USB_PID_SETUP;
#endif
ptd_count++;
}
/* Avoid starving of endpoints */
if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
}
if (ptd_count || defer)
enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds) {
epq->stat_maxptds = epq->ptd_count;
DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
}
}
static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
struct isp1362_ep *ep;
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
struct urb *urb = get_urb(ep);
int ret;
if (!list_empty(&ep->active)) {
DBG(1, "%s: Skipping active %s ep %p\n", __func__,
epq->name, ep);
continue;
}
DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
epq->name, ep, ep->num_req);
ret = submit_req(isp1362_hcd, urb, ep, epq);
if (ret == -ENOMEM)
break;
else if (ret == -EOVERFLOW)
continue;
ptd_count++;
}
if (ptd_count) {
static int last_count;
if (ptd_count != last_count) {
DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
last_count = ptd_count;
}
enable_intl_transfers(isp1362_hcd);
}
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds)
epq->stat_maxptds = epq->ptd_count;
}
static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
{
u16 ptd_offset = ep->ptd_offset;
int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
ptd_offset += num_ptds * epq->blk_size;
if (ptd_offset < epq->buf_start + epq->buf_size)
return ptd_offset;
else
return -ENOMEM;
}
static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
{
int ptd_count = 0;
int flip = isp1362_hcd->istl_flip;
struct isp1362_ep_queue *epq;
int ptd_offset;
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
fill2:
epq = &isp1362_hcd->istl_queue[flip];
if (atomic_read(&epq->finishing)) {
DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
return;
}
if (!list_empty(&epq->active))
return;
ptd_offset = epq->buf_start;
list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
struct urb *urb = get_urb(ep);
s16 diff = fno - (u16)urb->start_frame;
DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
if (diff > urb->number_of_packets) {
/* time frame for this URB has elapsed */
finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
continue;
} else if (diff < -1) {
/* URB is not due in this frame or the next one.
* Comparing with '-1' instead of '0' accounts for double
* buffering in the ISP1362 which enables us to queue the PTD
* one frame ahead of time
*/
} else if (diff == -1) {
/* submit PTD's that are due in the next frame */
prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
if (ptd_offset + PTD_HEADER_SIZE + ep->length >
epq->buf_start + epq->buf_size) {
pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
__func__, ep->length);
continue;
}
ep->ptd_offset = ptd_offset;
list_add_tail(&ep->active, &epq->active);
ptd_offset = next_ptd(epq, ep);
if (ptd_offset < 0) {
pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
ep->num_req, epq->name);
break;
}
}
}
list_for_each_entry(ep, &epq->active, active) {
if (epq->active.next == &ep->active)
ep->ptd.mps |= PTD_LAST_MSK;
isp1362_write_ptd(isp1362_hcd, ep, epq);
ptd_count++;
}
if (ptd_count)
enable_istl_transfers(isp1362_hcd, flip);
epq->ptd_count += ptd_count;
if (epq->ptd_count > epq->stat_maxptds)
epq->stat_maxptds = epq->ptd_count;
/* check, whether the second ISTL buffer may also be filled */
if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
(flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
fno++;
ptd_count = 0;
flip = 1 - flip;
goto fill2;
}
}
static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
struct isp1362_ep_queue *epq)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
if (list_empty(&epq->active)) {
DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
return;
}
DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
atomic_inc(&epq->finishing);
list_for_each_entry_safe(ep, tmp, &epq->active, active) {
int index = ep->ptd_index;
DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
index, ep->ptd_offset);
BUG_ON(index < 0);
if (__test_and_clear_bit(index, &done_map)) {
isp1362_read_ptd(isp1362_hcd, ep, epq);
epq->free_ptd = index;
BUG_ON(ep->num_ptds == 0);
release_ptd_buffers(epq, ep);
DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
ep, ep->num_req);
if (!list_empty(&ep->remove_list)) {
list_del_init(&ep->remove_list);
DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
}
DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
ep, ep->num_req);
postproc_ep(isp1362_hcd, ep);
}
if (!done_map)
break;
}
if (done_map)
pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
epq->skip_map);
atomic_dec(&epq->finishing);
}
static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
{
struct isp1362_ep *ep;
struct isp1362_ep *tmp;
if (list_empty(&epq->active)) {
DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
return;
}
DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
atomic_inc(&epq->finishing);
list_for_each_entry_safe(ep, tmp, &epq->active, active) {
DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
isp1362_read_ptd(isp1362_hcd, ep, epq);
DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
postproc_ep(isp1362_hcd, ep);
}
WARN_ON(epq->blk_size != 0);
atomic_dec(&epq->finishing);
}
static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
{
int handled = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u16 irqstat;
u16 svc_mask;
spin_lock(&isp1362_hcd->lock);
BUG_ON(isp1362_hcd->irq_active++);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
/* only handle interrupts that are currently enabled */
irqstat &= isp1362_hcd->irqenb;
isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
svc_mask = irqstat;
if (irqstat & HCuPINT_SOF) {
isp1362_hcd->irqenb &= ~HCuPINT_SOF;
isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
handled = 1;
svc_mask &= ~HCuPINT_SOF;
DBG(3, "%s: SOF\n", __func__);
isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
if (!list_empty(&isp1362_hcd->remove_list))
finish_unlinks(isp1362_hcd);
if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
if (list_empty(&isp1362_hcd->atl_queue.active)) {
start_atl_transfers(isp1362_hcd);
} else {
isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
isp1362_hcd->atl_queue.skip_map);
isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
}
}
}
if (irqstat & HCuPINT_ISTL0) {
isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
handled = 1;
svc_mask &= ~HCuPINT_ISTL0;
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
DBG(1, "%s: ISTL0\n", __func__);
WARN_ON((int)!!isp1362_hcd->istl_flip);
WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL0_ACTIVE);
WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL0_DONE));
isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
}
if (irqstat & HCuPINT_ISTL1) {
isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
handled = 1;
svc_mask &= ~HCuPINT_ISTL1;
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
DBG(1, "%s: ISTL1\n", __func__);
WARN_ON(!(int)isp1362_hcd->istl_flip);
WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL1_ACTIVE);
WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
HCBUFSTAT_ISTL1_DONE));
isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
}
if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
(HCuPINT_ISTL0 | HCuPINT_ISTL1));
finish_iso_transfers(isp1362_hcd,
&isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
start_iso_transfers(isp1362_hcd);
isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
}
if (irqstat & HCuPINT_INTL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
DBG(2, "%s: INTL\n", __func__);
svc_mask &= ~HCuPINT_INTL;
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
if (~(done_map | skip_map) == 0)
/* All PTDs are finished, disable INTL processing entirely */
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
handled = 1;
WARN_ON(!done_map);
if (done_map) {
DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
start_intl_transfers(isp1362_hcd);
}
}
if (irqstat & HCuPINT_ATL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
DBG(2, "%s: ATL\n", __func__);
svc_mask &= ~HCuPINT_ATL;
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
if (~(done_map | skip_map) == 0)
isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
if (done_map) {
DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
start_atl_transfers(isp1362_hcd);
}
handled = 1;
}
if (irqstat & HCuPINT_OPR) {
u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
svc_mask &= ~HCuPINT_OPR;
DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
intstat &= isp1362_hcd->intenb;
if (intstat & OHCI_INTR_UE) {
pr_err("Unrecoverable error\n");
/* FIXME: do here reset or cleanup or whatever */
}
if (intstat & OHCI_INTR_RHSC) {
isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
}
if (intstat & OHCI_INTR_RD) {
pr_info("%s: RESUME DETECTED\n", __func__);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
usb_hcd_resume_root_hub(hcd);
}
isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
irqstat &= ~HCuPINT_OPR;
handled = 1;
}
if (irqstat & HCuPINT_SUSP) {
isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
handled = 1;
svc_mask &= ~HCuPINT_SUSP;
pr_info("%s: SUSPEND IRQ\n", __func__);
}
if (irqstat & HCuPINT_CLKRDY) {
isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
handled = 1;
isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
svc_mask &= ~HCuPINT_CLKRDY;
pr_info("%s: CLKRDY IRQ\n", __func__);
}
if (svc_mask)
pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
isp1362_hcd->irq_active--;
spin_unlock(&isp1362_hcd->lock);
return IRQ_RETVAL(handled);
}
/*-------------------------------------------------------------------------*/
#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
{
int i, branch = -ENOSPC;
/* search for the least loaded schedule branch of that interval
* which has enough bandwidth left unreserved.
*/
for (i = 0; i < interval; i++) {
if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
int j;
for (j = i; j < PERIODIC_SIZE; j += interval) {
if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
break;
}
}
if (j < PERIODIC_SIZE)
continue;
branch = i;
}
}
return branch;
}
/* NB! ALL the code above this point runs with isp1362_hcd->lock
held, irqs off
*/
/*-------------------------------------------------------------------------*/
static int isp1362_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct usb_device *udev = urb->dev;
unsigned int pipe = urb->pipe;
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
struct usb_host_endpoint *hep = urb->ep;
struct isp1362_ep *ep = NULL;
unsigned long flags;
int retval = 0;
DBG(3, "%s: urb %p\n", __func__, urb);
if (type == PIPE_ISOCHRONOUS) {
pr_err("Isochronous transfers not supported\n");
return -ENOSPC;
}
URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
usb_pipedevice(pipe), epnum,
is_out ? "out" : "in",
usb_pipecontrol(pipe) ? "ctrl" :
usb_pipeint(pipe) ? "int" :
usb_pipebulk(pipe) ? "bulk" :
"iso",
urb->transfer_buffer_length,
(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
"short_ok" : "");
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
ep = kcalloc(1, sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* don't submit to a dead or disabled port */
if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
(1 << USB_PORT_FEAT_ENABLE)) ||
!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
retval = -ENODEV;
goto fail_not_linked;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval) {
kfree(ep);
goto fail_not_linked;
}
if (hep->hcpriv) {
ep = hep->hcpriv;
} else {
INIT_LIST_HEAD(&ep->schedule);
INIT_LIST_HEAD(&ep->active);
INIT_LIST_HEAD(&ep->remove_list);
ep->udev = usb_get_dev(udev);
ep->hep = hep;
ep->epnum = epnum;
ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
ep->ptd_offset = -EINVAL;
ep->ptd_index = -EINVAL;
usb_settoggle(udev, epnum, is_out, 0);
if (type == PIPE_CONTROL)
ep->nextpid = USB_PID_SETUP;
else if (is_out)
ep->nextpid = USB_PID_OUT;
else
ep->nextpid = USB_PID_IN;
switch (type) {
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
if (urb->interval > PERIODIC_SIZE)
urb->interval = PERIODIC_SIZE;
ep->interval = urb->interval;
ep->branch = PERIODIC_SIZE;
ep->load = usb_calc_bus_time(udev->speed, !is_out,
(type == PIPE_ISOCHRONOUS),
usb_maxpacket(udev, pipe, is_out)) / 1000;
break;
}
hep->hcpriv = ep;
}
ep->num_req = isp1362_hcd->req_serial++;
/* maybe put endpoint into schedule */
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
if (list_empty(&ep->schedule)) {
DBG(1, "%s: Adding ep %p req %d to async schedule\n",
__func__, ep, ep->num_req);
list_add_tail(&ep->schedule, &isp1362_hcd->async);
}
break;
case PIPE_ISOCHRONOUS:
case PIPE_INTERRUPT:
urb->interval = ep->interval;
/* urb submitted for already existing EP */
if (ep->branch < PERIODIC_SIZE)
break;
retval = balance(isp1362_hcd, ep->interval, ep->load);
if (retval < 0) {
pr_err("%s: balance returned %d\n", __func__, retval);
goto fail;
}
ep->branch = retval;
retval = 0;
isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
__func__, isp1362_hcd->fmindex, ep->branch,
((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
~(PERIODIC_SIZE - 1)) + ep->branch,
(isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
if (list_empty(&ep->schedule)) {
if (type == PIPE_ISOCHRONOUS) {
u16 frame = isp1362_hcd->fmindex;
frame += max_t(u16, 8, ep->interval);
frame &= ~(ep->interval - 1);
frame |= ep->branch;
if (frame_before(frame, isp1362_hcd->fmindex))
frame += ep->interval;
urb->start_frame = frame;
DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
} else {
DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
}
} else
DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
ep->load / ep->interval, isp1362_hcd->load[ep->branch],
isp1362_hcd->load[ep->branch] + ep->load);
isp1362_hcd->load[ep->branch] += ep->load;
}
urb->hcpriv = hep;
ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
switch (type) {
case PIPE_CONTROL:
case PIPE_BULK:
start_atl_transfers(isp1362_hcd);
break;
case PIPE_INTERRUPT:
start_intl_transfers(isp1362_hcd);
break;
case PIPE_ISOCHRONOUS:
start_iso_transfers(isp1362_hcd);
break;
default:
BUG();
}
fail:
if (retval)
usb_hcd_unlink_urb_from_ep(hcd, urb);
fail_not_linked:
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (retval)
DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
return retval;
}
static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct usb_host_endpoint *hep;
unsigned long flags;
struct isp1362_ep *ep;
int retval = 0;
DBG(3, "%s: urb %p\n", __func__, urb);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval)
goto done;
hep = urb->hcpriv;
if (!hep) {
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return -EIDRM;
}
ep = hep->hcpriv;
if (ep) {
/* In front of queue? */
if (ep->hep->urb_list.next == &urb->urb_list) {
if (!list_empty(&ep->active)) {
DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
/* disable processing and queue PTD for removal */
remove_ptd(isp1362_hcd, ep);
urb = NULL;
}
}
if (urb) {
DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
ep->num_req);
finish_request(isp1362_hcd, ep, urb, status);
} else
DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
} else {
pr_warning("%s: No EP in URB %p\n", __func__, urb);
retval = -EINVAL;
}
done:
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
DBG(3, "%s: exit\n", __func__);
return retval;
}
static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
struct isp1362_ep *ep = hep->hcpriv;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
DBG(1, "%s: ep %p\n", __func__, ep);
if (!ep)
return;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
if (!list_empty(&hep->urb_list)) {
if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
remove_ptd(isp1362_hcd, ep);
pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
}
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* Wait for interrupt to clear out active list */
while (!list_empty(&ep->active))
msleep(1);
DBG(1, "%s: Freeing EP %p\n", __func__, ep);
usb_put_dev(ep->udev);
kfree(ep);
hep->hcpriv = NULL;
}
static int isp1362_get_frame(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u32 fmnum;
unsigned long flags;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return (int)fmnum;
}
/*-------------------------------------------------------------------------*/
/* Adapted from ohci-hub.c */
static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
int ports, i, changed = 0;
unsigned long flags;
if (!HC_IS_RUNNING(hcd->state))
return -ESHUTDOWN;
/* Report no status change now, if we are scheduled to be
called later */
if (timer_pending(&hcd->rh_timer))
return 0;
ports = isp1362_hcd->rhdesca & RH_A_NDP;
BUG_ON(ports > 2);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* init status */
if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
buf[0] = changed = 1;
else
buf[0] = 0;
for (i = 0; i < ports; i++) {
u32 status = isp1362_hcd->rhport[i];
if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
RH_PS_OCIC | RH_PS_PRSC)) {
changed = 1;
buf[0] |= 1 << (i + 1);
continue;
}
if (!(status & RH_PS_CCS))
continue;
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return changed;
}
static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
struct usb_hub_descriptor *desc)
{
u32 reg = isp1362_hcd->rhdesca;
DBG(3, "%s: enter\n", __func__);
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = reg & 0x3;
/* Power switching, device type, overcurrent. */
desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
/* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
desc->bitmap[1] = ~0;
DBG(3, "%s: exit\n", __func__);
}
/* Adapted from ohci-hub.c */
static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
int retval = 0;
unsigned long flags;
unsigned long t1;
int ports = isp1362_hcd->rhdesca & RH_A_NDP;
u32 tmp = 0;
switch (typeReq) {
case ClearHubFeature:
DBG(0, "ClearHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
_DBG(0, "C_HUB_OVER_CURRENT\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
case C_HUB_LOCAL_POWER:
_DBG(0, "C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case SetHubFeature:
DBG(0, "SetHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
_DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
}
break;
case GetHubDescriptor:
DBG(0, "GetHubDescriptor\n");
isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
DBG(0, "GetHubStatus\n");
put_unaligned(cpu_to_le32(0), (__le32 *) buf);
break;
case GetPortStatus:
#ifndef VERBOSE
DBG(0, "GetPortStatus\n");
#endif
if (!wIndex || wIndex > ports)
goto error;
tmp = isp1362_hcd->rhport[--wIndex];
put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
break;
case ClearPortFeature:
DBG(0, "ClearPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
_DBG(0, "USB_PORT_FEAT_ENABLE\n");
tmp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
_DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
tmp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
tmp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
_DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
tmp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
_DBG(0, "USB_PORT_FEAT_POWER\n");
tmp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
_DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
tmp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
_DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
tmp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
_DBG(0, "USB_PORT_FEAT_C_RESET\n");
tmp = RH_PS_PRSC;
break;
default:
goto error;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case SetPortFeature:
DBG(0, "SetPortFeature: ");
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
#ifdef CONFIG_USB_OTG
if (ohci->hcd.self.otg_port == (wIndex + 1) &&
ohci->hcd.self.b_hnp_enable) {
start_hnp(ohci);
break;
}
#endif
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_POWER:
_DBG(0, "USB_PORT_FEAT_POWER\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
isp1362_hcd->rhport[wIndex] =
isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_RESET:
_DBG(0, "USB_PORT_FEAT_RESET\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
while (time_before(jiffies, t1)) {
/* spin until any current reset finishes */
for (;;) {
tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
if (!(tmp & RH_PS_PRS))
break;
udelay(500);
}
if (!(tmp & RH_PS_CCS))
break;
/* Reset lasts 10ms (claims datasheet) */
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
msleep(10);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
}
isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
HCRHPORT1 + wIndex);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
default:
goto error;
}
break;
default:
error:
/* "protocol stall" on error */
_DBG(0, "PROTOCOL STALL\n");
retval = -EPIPE;
}
return retval;
}
#ifdef CONFIG_PM
static int isp1362_bus_suspend(struct usb_hcd *hcd)
{
int status = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
if (time_before(jiffies, isp1362_hcd->next_statechange))
msleep(5);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_RESUME:
DBG(0, "%s: resume/suspend?\n", __func__);
isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
isp1362_hcd->hc_control |= OHCI_USB_RESET;
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
/* FALL THROUGH */
case OHCI_USB_RESET:
status = -EBUSY;
pr_warning("%s: needs reinit!\n", __func__);
goto done;
case OHCI_USB_SUSPEND:
pr_warning("%s: already suspended?\n", __func__);
goto done;
}
DBG(0, "%s: suspend root hub\n", __func__);
/* First stop any processing */
hcd->state = HC_STATE_QUIESCING;
if (!list_empty(&isp1362_hcd->atl_queue.active) ||
!list_empty(&isp1362_hcd->intl_queue.active) ||
!list_empty(&isp1362_hcd->istl_queue[0] .active) ||
!list_empty(&isp1362_hcd->istl_queue[1] .active)) {
int limit;
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
DBG(0, "%s: stopping schedules ...\n", __func__);
limit = 2000;
while (limit > 0) {
udelay(250);
limit -= 250;
if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
break;
}
mdelay(7);
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
}
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
}
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
}
DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
/* Suspend hub */
isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
isp1362_show_reg(isp1362_hcd, HCCONTROL);
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
#if 1
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
pr_err("%s: controller won't suspend %08x\n", __func__,
isp1362_hcd->hc_control);
status = -EBUSY;
} else
#endif
{
/* no resumes until devices finish suspending */
isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
}
done:
if (status == 0) {
hcd->state = HC_STATE_SUSPENDED;
DBG(0, "%s: HCD suspended: %08x\n", __func__,
isp1362_read_reg32(isp1362_hcd, HCCONTROL));
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return status;
}
static int isp1362_bus_resume(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
u32 port;
unsigned long flags;
int status = -EINPROGRESS;
if (time_before(jiffies, isp1362_hcd->next_statechange))
msleep(5);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
if (hcd->state == HC_STATE_RESUMING) {
pr_warning("%s: duplicate resume\n", __func__);
status = 0;
} else
switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_SUSPEND:
DBG(0, "%s: resume root hub\n", __func__);
isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
isp1362_hcd->hc_control |= OHCI_USB_RESUME;
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
break;
case OHCI_USB_RESUME:
/* HCFS changes sometime after INTR_RD */
DBG(0, "%s: remote wakeup\n", __func__);
break;
case OHCI_USB_OPER:
DBG(0, "%s: odd resume\n", __func__);
status = 0;
hcd->self.root_hub->dev.power.power_state = PMSG_ON;
break;
default: /* RESET, we lost power */
DBG(0, "%s: root hub hardware reset\n", __func__);
status = -EBUSY;
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (status == -EBUSY) {
DBG(0, "%s: Restarting HC\n", __func__);
isp1362_hc_stop(hcd);
return isp1362_hc_start(hcd);
}
if (status != -EINPROGRESS)
return status;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
while (port--) {
u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
/* force global, not selective, resume */
if (!(stat & RH_PS_PSS)) {
DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
continue;
}
DBG(0, "%s: Resuming RH port %d\n", __func__, port);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
}
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* Some controllers (lucent) need extra-long delays */
hcd->state = HC_STATE_RESUMING;
mdelay(20 /* usb 11.5.1.10 */ + 15);
isp1362_hcd->hc_control = OHCI_USB_OPER;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_show_reg(isp1362_hcd, HCCONTROL);
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
/* TRSMRCY */
msleep(10);
/* keep it alive for ~5x suspend + resume costs */
isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
hcd->self.root_hub->dev.power.power_state = PMSG_ON;
hcd->state = HC_STATE_RUNNING;
return 0;
}
#else
#define isp1362_bus_suspend NULL
#define isp1362_bus_resume NULL
#endif
/*-------------------------------------------------------------------------*/
#ifdef STUB_DEBUG_FILE
static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
}
static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
}
#else
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static void dump_irq(struct seq_file *s, char *label, u16 mask)
{
seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
mask & HCuPINT_CLKRDY ? " clkrdy" : "",
mask & HCuPINT_SUSP ? " susp" : "",
mask & HCuPINT_OPR ? " opr" : "",
mask & HCuPINT_EOT ? " eot" : "",
mask & HCuPINT_ATL ? " atl" : "",
mask & HCuPINT_SOF ? " sof" : "");
}
static void dump_int(struct seq_file *s, char *label, u32 mask)
{
seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
mask & OHCI_INTR_MIE ? " MIE" : "",
mask & OHCI_INTR_RHSC ? " rhsc" : "",
mask & OHCI_INTR_FNO ? " fno" : "",
mask & OHCI_INTR_UE ? " ue" : "",
mask & OHCI_INTR_RD ? " rd" : "",
mask & OHCI_INTR_SF ? " sof" : "",
mask & OHCI_INTR_SO ? " so" : "");
}
static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
{
seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
mask & OHCI_CTRL_RWC ? " rwc" : "",
mask & OHCI_CTRL_RWE ? " rwe" : "",
({
char *hcfs;
switch (mask & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
hcfs = " oper";
break;
case OHCI_USB_RESET:
hcfs = " reset";
break;
case OHCI_USB_RESUME:
hcfs = " resume";
break;
case OHCI_USB_SUSPEND:
hcfs = " suspend";
break;
default:
hcfs = " ?";
}
hcfs;
}));
}
static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
{
seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
isp1362_read_reg32(isp1362_hcd, HCREVISION));
seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
isp1362_read_reg32(isp1362_hcd, HCCONTROL));
seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
isp1362_read_reg32(isp1362_hcd, HCINTENB));
seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
isp1362_read_reg32(isp1362_hcd, HCFMREM));
seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
isp1362_read_reg32(isp1362_hcd, HCFMNUM));
seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
seq_printf(s, "\n");
seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
isp1362_read_reg16(isp1362_hcd, HCHWCFG));
seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
isp1362_read_reg16(isp1362_hcd, HCDMACFG));
seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
isp1362_read_reg16(isp1362_hcd, HCuPINT));
seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
isp1362_read_reg16(isp1362_hcd, HCCHIPID));
seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
#if 0
seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
#endif
seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
seq_printf(s, "\n");
seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
seq_printf(s, "\n");
seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
#if 0
seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
isp1362_read_reg32(isp1362_hcd, HCATLDONE));
#endif
seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
isp1362_read_reg32(isp1362_hcd, HCATLLAST));
seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
isp1362_read_reg16(isp1362_hcd, HCATLCURR));
seq_printf(s, "\n");
seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
isp1362_read_reg16(isp1362_hcd, HCATLDTC));
seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
}
static int proc_isp1362_show(struct seq_file *s, void *unused)
{
struct isp1362_hcd *isp1362_hcd = s->private;
struct isp1362_ep *ep;
int i;
seq_printf(s, "%s\n%s version %s\n",
isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
/* collect statistics to help estimate potential win for
* DMA engines that care about alignment (PXA)
*/
seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
isp1362_hcd->stat2, isp1362_hcd->stat1);
seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
seq_printf(s, "max # ptds in ISTL fifo: %d\n",
max(isp1362_hcd->istl_queue[0] .stat_maxptds,
isp1362_hcd->istl_queue[1] .stat_maxptds));
/* FIXME: don't show the following in suspended state */
spin_lock_irq(&isp1362_hcd->lock);
dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
for (i = 0; i < NUM_ISP1362_IRQS; i++)
if (isp1362_hcd->irq_stat[i])
seq_printf(s, "%-15s: %d\n",
ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
dump_regs(s, isp1362_hcd);
list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
struct urb *urb;
seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
({
char *s;
switch (ep->nextpid) {
case USB_PID_IN:
s = "in";
break;
case USB_PID_OUT:
s = "out";
break;
case USB_PID_SETUP:
s = "setup";
break;
case USB_PID_ACK:
s = "status";
break;
default:
s = "?";
break;
};
s;}), ep->maxpacket) ;
list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
seq_printf(s, " urb%p, %d/%d\n", urb,
urb->actual_length,
urb->transfer_buffer_length);
}
}
if (!list_empty(&isp1362_hcd->async))
seq_printf(s, "\n");
dump_ptd_queue(&isp1362_hcd->atl_queue);
seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
ep->interval, ep,
(ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
ep->udev->devnum, ep->epnum,
(ep->epnum == 0) ? "" :
((ep->nextpid == USB_PID_IN) ?
"in" : "out"), ep->maxpacket);
}
dump_ptd_queue(&isp1362_hcd->intl_queue);
seq_printf(s, "ISO:\n");
list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
ep->interval, ep,
(ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
ep->udev->devnum, ep->epnum,
(ep->epnum == 0) ? "" :
((ep->nextpid == USB_PID_IN) ?
"in" : "out"), ep->maxpacket);
}
spin_unlock_irq(&isp1362_hcd->lock);
seq_printf(s, "\n");
return 0;
}
static int proc_isp1362_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_isp1362_show, PDE(inode)->data);
}
static const struct file_operations proc_ops = {
.open = proc_isp1362_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* expect just one isp1362_hcd per system */
static const char proc_filename[] = "driver/isp1362";
static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
struct proc_dir_entry *pde;
pde = create_proc_entry(proc_filename, 0, NULL);
if (pde == NULL) {
pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
return;
}
pde->proc_fops = &proc_ops;
pde->data = isp1362_hcd;
isp1362_hcd->pde = pde;
}
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
if (isp1362_hcd->pde)
remove_proc_entry(proc_filename, 0);
}
#endif
/*-------------------------------------------------------------------------*/
static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
{
int tmp = 20;
unsigned long flags;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
while (--tmp) {
mdelay(1);
if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
break;
}
if (!tmp)
pr_err("Software reset timeout\n");
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
static int isp1362_mem_config(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
u32 total;
u16 istl_size = ISP1362_ISTL_BUFSIZE;
u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
u16 atl_size;
int i;
WARN_ON(istl_size & 3);
WARN_ON(atl_blksize & 3);
WARN_ON(intl_blksize & 3);
WARN_ON(atl_blksize < PTD_HEADER_SIZE);
WARN_ON(intl_blksize < PTD_HEADER_SIZE);
BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
if (atl_buffers > 32)
atl_buffers = 32;
atl_size = atl_buffers * atl_blksize;
total = atl_size + intl_size + istl_size;
dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
istl_size / 2, istl_size, 0, istl_size / 2);
dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
intl_size, istl_size);
dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
atl_buffers, atl_blksize - PTD_HEADER_SIZE,
atl_size, istl_size + intl_size);
dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
ISP1362_BUF_SIZE - total);
if (total > ISP1362_BUF_SIZE) {
dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
__func__, total, ISP1362_BUF_SIZE);
return -ENOMEM;
}
total = istl_size + intl_size + atl_size;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
for (i = 0; i < 2; i++) {
isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
isp1362_hcd->istl_queue[i].blk_size = 4;
INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
snprintf(isp1362_hcd->istl_queue[i].name,
sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
DBG(3, "%s: %5s buf $%04x %d\n", __func__,
isp1362_hcd->istl_queue[i].name,
isp1362_hcd->istl_queue[i].buf_start,
isp1362_hcd->istl_queue[i].buf_size);
}
isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
isp1362_hcd->intl_queue.buf_start = istl_size;
isp1362_hcd->intl_queue.buf_size = intl_size;
isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
isp1362_hcd->intl_queue.blk_size = intl_blksize;
isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
isp1362_hcd->intl_queue.skip_map = ~0;
INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
isp1362_hcd->intl_queue.buf_size);
isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
1 << (ISP1362_INTL_BUFFERS - 1));
isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
isp1362_hcd->atl_queue.buf_size = atl_size;
isp1362_hcd->atl_queue.buf_count = atl_buffers;
isp1362_hcd->atl_queue.blk_size = atl_blksize;
isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
isp1362_hcd->atl_queue.skip_map = ~0;
INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
isp1362_hcd->atl_queue.buf_size);
isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
isp1362_write_reg32(isp1362_hcd, HCATLLAST,
1 << (atl_buffers - 1));
snprintf(isp1362_hcd->atl_queue.name,
sizeof(isp1362_hcd->atl_queue.name), "ATL");
snprintf(isp1362_hcd->intl_queue.name,
sizeof(isp1362_hcd->intl_queue.name), "INTL");
DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
isp1362_hcd->intl_queue.name,
isp1362_hcd->intl_queue.buf_start,
ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
isp1362_hcd->intl_queue.buf_size);
DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
isp1362_hcd->atl_queue.name,
isp1362_hcd->atl_queue.buf_start,
atl_buffers, isp1362_hcd->atl_queue.blk_size,
isp1362_hcd->atl_queue.buf_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
static int isp1362_hc_reset(struct usb_hcd *hcd)
{
int ret = 0;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long t;
unsigned long timeout = 100;
unsigned long flags;
int clkrdy = 0;
pr_info("%s:\n", __func__);
if (isp1362_hcd->board && isp1362_hcd->board->reset) {
isp1362_hcd->board->reset(hcd->self.controller, 1);
msleep(20);
if (isp1362_hcd->board->clock)
isp1362_hcd->board->clock(hcd->self.controller, 1);
isp1362_hcd->board->reset(hcd->self.controller, 0);
} else
isp1362_sw_reset(isp1362_hcd);
/* chip has been reset. First we need to see a clock */
t = jiffies + msecs_to_jiffies(timeout);
while (!clkrdy && time_before_eq(jiffies, t)) {
spin_lock_irqsave(&isp1362_hcd->lock, flags);
clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (!clkrdy)
msleep(4);
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (!clkrdy) {
pr_err("Clock not ready after %lums\n", timeout);
ret = -ENODEV;
}
return ret;
}
static void isp1362_hc_stop(struct usb_hcd *hcd)
{
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
u32 tmp;
pr_info("%s:\n", __func__);
del_timer_sync(&hcd->rh_timer);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
/* Switch off power for all ports */
tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
tmp &= ~(RH_A_NPS | RH_A_PSM);
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
/* Reset the chip */
if (isp1362_hcd->board && isp1362_hcd->board->reset)
isp1362_hcd->board->reset(hcd->self.controller, 1);
else
isp1362_sw_reset(isp1362_hcd);
if (isp1362_hcd->board && isp1362_hcd->board->clock)
isp1362_hcd->board->clock(hcd->self.controller, 0);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
#ifdef CHIP_BUFFER_TEST
static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
{
int ret = 0;
u16 *ref;
unsigned long flags;
ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
if (ref) {
int offset;
u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
ref[offset] = ~offset;
tst[offset] = offset;
}
for (offset = 0; offset < 4; offset++) {
int j;
for (j = 0; j < 8; j++) {
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, j)) {
ret = -ENODEV;
pr_err("%s: memory check with %d byte offset %d failed\n",
__func__, j, offset);
dump_data((u8 *)ref + offset, j);
dump_data((u8 *)tst + offset, j);
}
}
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
ret = -ENODEV;
pr_err("%s: memory check failed\n", __func__);
dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
}
for (offset = 0; offset < 256; offset++) {
int test_size = 0;
yield();
memset(tst, 0, ISP1362_BUF_SIZE);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
ISP1362_BUF_SIZE / 2)) {
pr_err("%s: Failed to clear buffer\n", __func__);
dump_data((u8 *)tst, ISP1362_BUF_SIZE);
break;
}
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
offset * 2 + PTD_HEADER_SIZE, test_size);
isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
PTD_HEADER_SIZE + test_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
PTD_HEADER_SIZE + test_size);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
ret = -ENODEV;
pr_err("%s: memory check with offset %02x failed\n",
__func__, offset);
break;
}
pr_warning("%s: memory check with offset %02x ok after second read\n",
__func__, offset);
}
}
kfree(ref);
}
return ret;
}
#endif
static int isp1362_hc_start(struct usb_hcd *hcd)
{
int ret;
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct isp1362_platform_data *board = isp1362_hcd->board;
u16 hwcfg;
u16 chipid;
unsigned long flags;
pr_info("%s:\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
return -ENODEV;
}
#ifdef CHIP_BUFFER_TEST
ret = isp1362_chip_test(isp1362_hcd);
if (ret)
return -ENODEV;
#endif
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* clear interrupt status and disable all interrupt sources */
isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
/* HW conf */
hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
if (board->sel15Kres)
hwcfg |= HCHWCFG_PULLDOWN_DS2 |
((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
if (board->clknotstop)
hwcfg |= HCHWCFG_CLKNOTSTOP;
if (board->oc_enable)
hwcfg |= HCHWCFG_ANALOG_OC;
if (board->int_act_high)
hwcfg |= HCHWCFG_INT_POL;
if (board->int_edge_triggered)
hwcfg |= HCHWCFG_INT_TRIGGER;
if (board->dreq_act_high)
hwcfg |= HCHWCFG_DREQ_POL;
if (board->dack_act_high)
hwcfg |= HCHWCFG_DACK_POL;
isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
isp1362_show_reg(isp1362_hcd, HCHWCFG);
isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
ret = isp1362_mem_config(hcd);
if (ret)
return ret;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* Root hub conf */
isp1362_hcd->rhdesca = 0;
if (board->no_power_switching)
isp1362_hcd->rhdesca |= RH_A_NPS;
if (board->power_switching_mode)
isp1362_hcd->rhdesca |= RH_A_PSM;
if (board->potpg)
isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
else
isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
isp1362_hcd->rhdescb = RH_B_PPCM;
isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
isp1362_hcd->hc_control = OHCI_USB_OPER;
hcd->state = HC_STATE_RUNNING;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
/* Set up interrupts */
isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
isp1362_hcd->intenb |= OHCI_INTR_RD;
isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
/* Go operational */
isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
/* enable global power */
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct hc_driver isp1362_hc_driver = {
.description = hcd_name,
.product_desc = "ISP1362 Host Controller",
.hcd_priv_size = sizeof(struct isp1362_hcd),
.irq = isp1362_irq,
.flags = HCD_USB11 | HCD_MEMORY,
.reset = isp1362_hc_reset,
.start = isp1362_hc_start,
.stop = isp1362_hc_stop,
.urb_enqueue = isp1362_urb_enqueue,
.urb_dequeue = isp1362_urb_dequeue,
.endpoint_disable = isp1362_endpoint_disable,
.get_frame_number = isp1362_get_frame,
.hub_status_data = isp1362_hub_status_data,
.hub_control = isp1362_hub_control,
.bus_suspend = isp1362_bus_suspend,
.bus_resume = isp1362_bus_resume,
};
/*-------------------------------------------------------------------------*/
#define resource_len(r) (((r)->end - (r)->start) + 1)
static int __devexit isp1362_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
struct resource *res;
remove_debug_file(isp1362_hcd);
DBG(0, "%s: Removing HCD\n", __func__);
usb_remove_hcd(hcd);
DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
isp1362_hcd->data_reg);
iounmap(isp1362_hcd->data_reg);
DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
isp1362_hcd->addr_reg);
iounmap(isp1362_hcd->addr_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
if (res)
release_mem_region(res->start, resource_len(res));
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
if (res)
release_mem_region(res->start, resource_len(res));
DBG(0, "%s: put_hcd\n", __func__);
usb_put_hcd(hcd);
DBG(0, "%s: Done\n", __func__);
return 0;
}
static int __init isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
struct resource *addr, *data;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
int retval = 0;
struct resource *irq_res;
unsigned int irq_flags = 0;
/* basic sanity checks first. board-specific init logic should
* have initialized this the three resources and probably board
* specific platform_data. we don't probe for IRQs, and do only
* minimal sanity checking.
*/
if (pdev->num_resources < 3) {
retval = -ENODEV;
goto err1;
}
data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!addr || !data || !irq_res) {
retval = -ENODEV;
goto err1;
}
irq = irq_res->start;
#ifdef CONFIG_USB_HCD_DMA
if (pdev->dev.dma_mask) {
struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!dma_res) {
retval = -ENODEV;
goto err1;
}
isp1362_hcd->data_dma = dma_res->start;
isp1362_hcd->max_dma_size = resource_len(dma_res);
}
#else
if (pdev->dev.dma_mask) {
DBG(1, "won't do DMA");
retval = -ENODEV;
goto err1;
}
#endif
if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
retval = -EBUSY;
goto err1;
}
addr_reg = ioremap(addr->start, resource_len(addr));
if (addr_reg == NULL) {
retval = -ENOMEM;
goto err2;
}
if (!request_mem_region(data->start, resource_len(data), hcd_name)) {
retval = -EBUSY;
goto err3;
}
data_reg = ioremap(data->start, resource_len(data));
if (data_reg == NULL) {
retval = -ENOMEM;
goto err4;
}
/* allocate and initialize hcd */
hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err5;
}
hcd->rsrc_start = data->start;
isp1362_hcd = hcd_to_isp1362_hcd(hcd);
isp1362_hcd->data_reg = data_reg;
isp1362_hcd->addr_reg = addr_reg;
isp1362_hcd->next_statechange = jiffies;
spin_lock_init(&isp1362_hcd->lock);
INIT_LIST_HEAD(&isp1362_hcd->async);
INIT_LIST_HEAD(&isp1362_hcd->periodic);
INIT_LIST_HEAD(&isp1362_hcd->isoc);
INIT_LIST_HEAD(&isp1362_hcd->remove_list);
isp1362_hcd->board = pdev->dev.platform_data;
#if USE_PLATFORM_DELAY
if (!isp1362_hcd->board->delay) {
dev_err(hcd->self.controller, "No platform delay function given\n");
retval = -ENODEV;
goto err6;
}
#endif
if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_flags |= IRQF_TRIGGER_RISING;
if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
irq_flags |= IRQF_TRIGGER_FALLING;
if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
irq_flags |= IRQF_TRIGGER_HIGH;
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irq_flags |= IRQF_TRIGGER_LOW;
retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err6;
pr_info("%s, irq %d\n", hcd->product_desc, irq);
create_debug_file(isp1362_hcd);
return 0;
err6:
DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
usb_put_hcd(hcd);
err5:
DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
iounmap(data_reg);
err4:
DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
release_mem_region(data->start, resource_len(data));
err3:
DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
iounmap(addr_reg);
err2:
DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
release_mem_region(addr->start, resource_len(addr));
err1:
pr_err("%s: init error, %d\n", __func__, retval);
return retval;
}
#ifdef CONFIG_PM
static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
int retval = 0;
DBG(0, "%s: Suspending device\n", __func__);
if (state.event == PM_EVENT_FREEZE) {
DBG(0, "%s: Suspending root hub\n", __func__);
retval = isp1362_bus_suspend(hcd);
} else {
DBG(0, "%s: Suspending RH ports\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
}
if (retval == 0)
pdev->dev.power.power_state = state;
return retval;
}
static int isp1362_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
unsigned long flags;
DBG(0, "%s: Resuming\n", __func__);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
DBG(0, "%s: Resume RH ports\n", __func__);
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
return 0;
}
pdev->dev.power.power_state = PMSG_ON;
return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
}
#else
#define isp1362_suspend NULL
#define isp1362_resume NULL
#endif
static struct platform_driver isp1362_driver = {
.probe = isp1362_probe,
.remove = __devexit_p(isp1362_remove),
.suspend = isp1362_suspend,
.resume = isp1362_resume,
.driver = {
.name = (char *)hcd_name,
.owner = THIS_MODULE,
},
};
/*-------------------------------------------------------------------------*/
static int __init isp1362_init(void)
{
if (usb_disabled())
return -ENODEV;
pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
return platform_driver_register(&isp1362_driver);
}
module_init(isp1362_init);
static void __exit isp1362_cleanup(void)
{
platform_driver_unregister(&isp1362_driver);
}
module_exit(isp1362_cleanup);
| thanhnhiel/linux-emcraft | drivers/usb/host/isp1362-hcd.c | C | gpl-2.0 | 89,685 |
/* linux/drivers/i2c/busses/i2c-s3c2410.c
*
* Copyright (C) 2004,2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 I2C Controller
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/arch/regs-gpio.h>
#include <asm/arch/regs-iic.h>
#include <asm/arch/iic.h>
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
STATE_START,
STATE_READ,
STATE_WRITE,
STATE_STOP
};
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
struct i2c_msg *msg;
unsigned int msg_num;
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int tx_setup;
enum s3c24xx_i2c_state state;
void __iomem *regs;
struct clk *clk;
struct device *dev;
struct resource *irq;
struct resource *ioarea;
struct i2c_adapter adap;
};
/* default platform data to use if not supplied in the platform_device
*/
static struct s3c2410_platform_i2c s3c24xx_i2c_default_platform = {
.flags = 0,
.slave_addr = 0x10,
.bus_freq = 100*1000,
.max_freq = 400*1000,
.sda_delay = S3C2410_IICLC_SDA_DELAY5 | S3C2410_IICLC_FILTER_ON,
};
/* s3c24xx_i2c_is2440()
*
* return true is this is an s3c2440
*/
static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
{
struct platform_device *pdev = to_platform_device(i2c->dev);
return !strcmp(pdev->name, "s3c2440-i2c");
}
/* s3c24xx_i2c_get_platformdata
*
* get the platform data associated with the given device, or return
* the default if there is none
*/
static inline struct s3c2410_platform_i2c *s3c24xx_i2c_get_platformdata(struct device *dev)
{
if (dev->platform_data != NULL)
return (struct s3c2410_platform_i2c *)dev->platform_data;
return &s3c24xx_i2c_default_platform;
}
/* s3c24xx_i2c_master_complete
*
* complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
*/
static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
i2c->msg_ptr = 0;
i2c->msg = NULL;
i2c->msg_idx ++;
i2c->msg_num = 0;
if (ret)
i2c->msg_idx = ret;
wake_up(&i2c->wait);
}
static inline void s3c24xx_i2c_disable_ack(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp & ~S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON);
}
static inline void s3c24xx_i2c_enable_ack(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp | S3C2410_IICCON_ACKEN, i2c->regs + S3C2410_IICCON);
}
/* irq enable/disable functions */
static inline void s3c24xx_i2c_disable_irq(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp & ~S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON);
}
static inline void s3c24xx_i2c_enable_irq(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + S3C2410_IICCON);
writel(tmp | S3C2410_IICCON_IRQEN, i2c->regs + S3C2410_IICCON);
}
/* s3c24xx_i2c_message_start
*
* put the start of a message onto the bus
*/
static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
struct i2c_msg *msg)
{
unsigned int addr = (msg->addr & 0x7f) << 1;
unsigned long stat;
unsigned long iiccon;
stat = 0;
stat |= S3C2410_IICSTAT_TXRXEN;
if (msg->flags & I2C_M_RD) {
stat |= S3C2410_IICSTAT_MASTER_RX;
addr |= 1;
} else
stat |= S3C2410_IICSTAT_MASTER_TX;
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
// todo - check for wether ack wanted or not
s3c24xx_i2c_enable_ack(i2c);
iiccon = readl(i2c->regs + S3C2410_IICCON);
writel(stat, i2c->regs + S3C2410_IICSTAT);
dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr);
writeb(addr, i2c->regs + S3C2410_IICDS);
/* delay here to ensure the data byte has gotten onto the bus
* before the transaction is started */
ndelay(i2c->tx_setup);
dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon);
writel(iiccon, i2c->regs + S3C2410_IICCON);
stat |= S3C2410_IICSTAT_START;
writel(stat, i2c->regs + S3C2410_IICSTAT);
}
static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
{
unsigned long iicstat = readl(i2c->regs + S3C2410_IICSTAT);
dev_dbg(i2c->dev, "STOP\n");
/* stop the transfer */
iicstat &= ~ S3C2410_IICSTAT_START;
writel(iicstat, i2c->regs + S3C2410_IICSTAT);
i2c->state = STATE_STOP;
s3c24xx_i2c_master_complete(i2c, ret);
s3c24xx_i2c_disable_irq(i2c);
}
/* helper functions to determine the current state in the set of
* messages we are sending */
/* is_lastmsg()
*
* returns TRUE if the current message is the last in the set
*/
static inline int is_lastmsg(struct s3c24xx_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
/* is_msglast
*
* returns TRUE if we this is the last byte in the current message
*/
static inline int is_msglast(struct s3c24xx_i2c *i2c)
{
return i2c->msg_ptr == i2c->msg->len-1;
}
/* is_msgend
*
* returns TRUE if we reached the end of the current message
*/
static inline int is_msgend(struct s3c24xx_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
/* i2s_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
int ret = 0;
switch (i2c->state) {
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __FUNCTION__);
goto out;
break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __FUNCTION__);
s3c24xx_i2c_disable_irq(i2c);
goto out_ack;
case STATE_START:
/* last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
if (iicstat & S3C2410_IICSTAT_LASTBIT &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
dev_dbg(i2c->dev, "ack was not received\n");
s3c24xx_i2c_stop(i2c, -EREMOTEIO);
goto out_ack;
}
if (i2c->msg->flags & I2C_M_RD)
i2c->state = STATE_READ;
else
i2c->state = STATE_WRITE;
/* terminate the transfer if there is nothing to do
* (used by the i2c probe to find devices */
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
s3c24xx_i2c_stop(i2c, 0);
goto out_ack;
}
if (i2c->state == STATE_READ)
goto prepare_read;
/* fall through to the write state, as we will need to
* send a byte as well */
case STATE_WRITE:
/* we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
retry_write:
if (!is_msgend(i2c)) {
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + S3C2410_IICDS);
/* delay after writing the byte to allow the
* data setup time on the bus, as writing the
* data to the register causes the first bit
* to appear on SDA, and SCL will change as
* soon as the interrupt is acknowledged */
ndelay(i2c->tx_setup);
} else if (!is_lastmsg(i2c)) {
/* we need to go to the next i2c message */
dev_dbg(i2c->dev, "WRITE: Next Message\n");
i2c->msg_ptr = 0;
i2c->msg_idx ++;
i2c->msg++;
/* check to see if we need to do another message */
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
/* cannot do this, the controller
* forces us to send a new START
* when we change direction */
s3c24xx_i2c_stop(i2c, -EINVAL);
}
goto retry_write;
} else {
/* send the new start */
s3c24xx_i2c_message_start(i2c, i2c->msg);
i2c->state = STATE_START;
}
} else {
/* send stop */
s3c24xx_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
/* we have a byte of data in the data register, do
* something with it, and then work out wether we are
* going to do any more read/write
*/
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK) &&
!(is_msglast(i2c) && is_lastmsg(i2c))) {
if (iicstat & S3C2410_IICSTAT_LASTBIT) {
dev_dbg(i2c->dev, "READ: No Ack\n");
s3c24xx_i2c_stop(i2c, -ECONNREFUSED);
goto out_ack;
}
}
byte = readb(i2c->regs + S3C2410_IICDS);
i2c->msg->buf[i2c->msg_ptr++] = byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
if (is_lastmsg(i2c))
s3c24xx_i2c_disable_ack(i2c);
} else if (is_msgend(i2c)) {
/* ok, we've read the entire buffer, see if there
* is anything else we need to do */
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
s3c24xx_i2c_stop(i2c, 0);
} else {
/* go to the next transfer */
dev_dbg(i2c->dev, "READ: Next Transfer\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
}
}
break;
}
/* acknowlegde the IRQ and get back on with the work */
out_ack:
tmp = readl(i2c->regs + S3C2410_IICCON);
tmp &= ~S3C2410_IICCON_IRQPEND;
writel(tmp, i2c->regs + S3C2410_IICCON);
out:
return ret;
}
/* s3c24xx_i2c_irq
*
* top level IRQ servicing routine
*/
static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
{
struct s3c24xx_i2c *i2c = dev_id;
unsigned long status;
unsigned long tmp;
status = readl(i2c->regs + S3C2410_IICSTAT);
if (status & S3C2410_IICSTAT_ARBITR) {
// deal with arbitration loss
dev_err(i2c->dev, "deal with arbitration loss\n");
}
if (i2c->state == STATE_IDLE) {
dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
tmp = readl(i2c->regs + S3C2410_IICCON);
tmp &= ~S3C2410_IICCON_IRQPEND;
writel(tmp, i2c->regs + S3C2410_IICCON);
goto out;
}
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
i2s_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
}
/* s3c24xx_i2c_set_master
*
* get the i2c bus for a master transaction
*/
static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
{
unsigned long iicstat;
int timeout = 400;
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
if (!(iicstat & S3C2410_IICSTAT_BUSBUSY))
return 0;
msleep(1);
}
dev_dbg(i2c->dev, "timeout: GPEDAT is %08x\n",
__raw_readl(S3C2410_GPEDAT));
return -ETIMEDOUT;
}
/* s3c24xx_i2c_doxfer
*
* this starts an i2c transfer
*/
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int num)
{
unsigned long timeout;
int ret;
ret = s3c24xx_i2c_set_master(i2c);
if (ret != 0) {
dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
ret = -EAGAIN;
goto out;
}
spin_lock_irq(&i2c->lock);
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
i2c->msg_idx = 0;
i2c->state = STATE_START;
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
/* having these next two as dev_err() makes life very
* noisy when doing an i2cdetect */
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
/* ensure the stop has been through the bus */
msleep(1);
out:
return ret;
}
/* s3c24xx_i2c_xfer
*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
*/
static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data;
int retry;
int ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN)
return ret;
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
return -EREMOTEIO;
}
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
.master_xfer = s3c24xx_i2c_xfer,
.functionality = s3c24xx_i2c_func,
};
static struct s3c24xx_i2c s3c24xx_i2c = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_i2c.lock),
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(s3c24xx_i2c.wait),
.tx_setup = 50,
.adap = {
.name = "s3c2410-i2c",
.owner = THIS_MODULE,
.algo = &s3c24xx_i2c_algorithm,
.retries = 2,
.class = I2C_CLASS_HWMON,
},
};
/* s3c24xx_i2c_calcdivisor
*
* return the divisor settings for a given frequency
*/
static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
unsigned int *div1, unsigned int *divs)
{
unsigned int calc_divs = clkin / wanted;
unsigned int calc_div1;
if (calc_divs > (16*16))
calc_div1 = 512;
else
calc_div1 = 16;
calc_divs += calc_div1-1;
calc_divs /= calc_div1;
if (calc_divs == 0)
calc_divs = 1;
if (calc_divs > 17)
calc_divs = 17;
*divs = calc_divs;
*div1 = calc_div1;
return clkin / (calc_divs * calc_div1);
}
/* freq_acceptable
*
* test wether a frequency is within the acceptable range of error
*/
static inline int freq_acceptable(unsigned int freq, unsigned int wanted)
{
int diff = freq - wanted;
return (diff >= -2 && diff <= 2);
}
/* s3c24xx_i2c_getdivisor
*
* work out a divisor for the user requested frequency setting,
* either by the requested frequency, or scanning the acceptable
* range of frequencies until something is found
*/
static int s3c24xx_i2c_getdivisor(struct s3c24xx_i2c *i2c,
struct s3c2410_platform_i2c *pdata,
unsigned long *iicon,
unsigned int *got)
{
unsigned long clkin = clk_get_rate(i2c->clk);
unsigned int divs, div1;
int freq;
int start, end;
clkin /= 1000; /* clkin now in KHz */
dev_dbg(i2c->dev, "pdata %p, freq %lu %lu..%lu\n",
pdata, pdata->bus_freq, pdata->min_freq, pdata->max_freq);
if (pdata->bus_freq != 0) {
freq = s3c24xx_i2c_calcdivisor(clkin, pdata->bus_freq/1000,
&div1, &divs);
if (freq_acceptable(freq, pdata->bus_freq/1000))
goto found;
}
/* ok, we may have to search for something suitable... */
start = (pdata->max_freq == 0) ? pdata->bus_freq : pdata->max_freq;
end = pdata->min_freq;
start /= 1000;
end /= 1000;
/* search loop... */
for (; start > end; start--) {
freq = s3c24xx_i2c_calcdivisor(clkin, start, &div1, &divs);
if (freq_acceptable(freq, start))
goto found;
}
/* cannot find frequency spec */
return -EINVAL;
found:
*got = freq;
*iicon |= (divs-1);
*iicon |= (div1 == 512) ? S3C2410_IICCON_TXDIV_512 : 0;
return 0;
}
/* s3c24xx_i2c_init
*
* initialise the controller, set the IO lines and frequency
*/
static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
{
unsigned long iicon = S3C2410_IICCON_IRQEN | S3C2410_IICCON_ACKEN;
struct s3c2410_platform_i2c *pdata;
unsigned int freq;
/* get the plafrom data */
pdata = s3c24xx_i2c_get_platformdata(i2c->adap.dev.parent);
/* inititalise the gpio */
s3c2410_gpio_cfgpin(S3C2410_GPE15, S3C2410_GPE15_IICSDA);
s3c2410_gpio_cfgpin(S3C2410_GPE14, S3C2410_GPE14_IICSCL);
/* write slave address */
writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
dev_info(i2c->dev, "slave address 0x%02x\n", pdata->slave_addr);
/* we need to work out the divisors for the clock... */
if (s3c24xx_i2c_getdivisor(i2c, pdata, &iicon, &freq) != 0) {
dev_err(i2c->dev, "cannot meet bus frequency required\n");
return -EINVAL;
}
/* todo - check that the i2c lines aren't being dragged anywhere */
dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq);
dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon);
writel(iicon, i2c->regs + S3C2410_IICCON);
/* check for s3c2440 i2c controller */
if (s3c24xx_i2c_is2440(i2c)) {
dev_dbg(i2c->dev, "S3C2440_IICLC=%08x\n", pdata->sda_delay);
writel(pdata->sda_delay, i2c->regs + S3C2440_IICLC);
}
return 0;
}
/* s3c24xx_i2c_probe
*
* called by the bus driver when a suitable device is found
*/
static int s3c24xx_i2c_probe(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = &s3c24xx_i2c;
struct resource *res;
int ret;
/* find the clock and enable it */
i2c->dev = &pdev->dev;
i2c->clk = clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = -ENOENT;
goto err_noclk;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
ret = -ENOENT;
goto err_clk;
}
i2c->ioarea = request_mem_region(res->start, (res->end-res->start)+1,
pdev->name);
if (i2c->ioarea == NULL) {
dev_err(&pdev->dev, "cannot request IO\n");
ret = -ENXIO;
goto err_clk;
}
i2c->regs = ioremap(res->start, (res->end-res->start)+1);
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
goto err_ioarea;
}
dev_dbg(&pdev->dev, "registers %p (%p, %p)\n", i2c->regs, i2c->ioarea, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
/* initialise the i2c controller */
ret = s3c24xx_i2c_init(i2c);
if (ret != 0)
goto err_iomap;
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IRQ\n");
ret = -ENOENT;
goto err_iomap;
}
ret = request_irq(res->start, s3c24xx_i2c_irq, IRQF_DISABLED,
pdev->name, i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ\n");
goto err_iomap;
}
i2c->irq = res;
dev_dbg(&pdev->dev, "irq resource %p (%lu)\n", res,
(unsigned long)res->start);
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
goto err_irq;
}
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: S3C I2C adapter\n", i2c->adap.dev.bus_id);
return 0;
err_irq:
free_irq(i2c->irq->start, i2c);
err_iomap:
iounmap(i2c->regs);
err_ioarea:
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
err_clk:
clk_disable(i2c->clk);
clk_put(i2c->clk);
err_noclk:
return ret;
}
/* s3c24xx_i2c_remove
*
* called when device is removed from the bus
*/
static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
free_irq(i2c->irq->start, i2c);
clk_disable(i2c->clk);
clk_put(i2c->clk);
iounmap(i2c->regs);
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
return 0;
}
#ifdef CONFIG_PM
static int s3c24xx_i2c_resume(struct platform_device *dev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(dev);
if (i2c != NULL)
s3c24xx_i2c_init(i2c);
return 0;
}
#else
#define s3c24xx_i2c_resume NULL
#endif
/* device driver for platform bus bits */
static struct platform_driver s3c2410_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
.resume = s3c24xx_i2c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-i2c",
},
};
static struct platform_driver s3c2440_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
.resume = s3c24xx_i2c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c2440-i2c",
},
};
static int __init i2c_adap_s3c_init(void)
{
int ret;
ret = platform_driver_register(&s3c2410_i2c_driver);
if (ret == 0) {
ret = platform_driver_register(&s3c2440_i2c_driver);
if (ret)
platform_driver_unregister(&s3c2410_i2c_driver);
}
return ret;
}
static void __exit i2c_adap_s3c_exit(void)
{
platform_driver_unregister(&s3c2410_i2c_driver);
platform_driver_unregister(&s3c2440_i2c_driver);
}
module_init(i2c_adap_s3c_init);
module_exit(i2c_adap_s3c_exit);
MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
| ghmajx/asuswrt-merlin | release/src-rt/linux/linux-2.6/drivers/i2c/busses/i2c-s3c2410.c | C | gpl-2.0 | 20,842 |
/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MSM_MEMORY_DUMP_H
#define __MSM_MEMORY_DUMP_H
#include <linux/types.h>
enum dump_client_type {
MSM_CPU_CTXT = 0,
MSM_L1_CACHE,
MSM_L2_CACHE,
MSM_OCMEM,
MSM_TMC_ETFETB,
MSM_ETM0_REG,
MSM_ETM1_REG,
MSM_ETM2_REG,
MSM_ETM3_REG,
MSM_TMC0_REG, /* TMC_ETR */
MSM_TMC1_REG, /* TMC_ETF */
MSM_LOG_BUF,
MSM_LOG_BUF_FIRST_IDX,
MAX_NUM_CLIENTS,
};
struct msm_client_dump {
enum dump_client_type id;
unsigned long start_addr;
unsigned long end_addr;
};
#ifdef CONFIG_MSM_MEMORY_DUMP
extern int msm_dump_tbl_register(struct msm_client_dump *client_entry);
#else
static inline int msm_dump_tbl_register(struct msm_client_dump *entry)
{
return -EIO;
}
#endif
#if defined(CONFIG_MSM_MEMORY_DUMP) || defined(CONFIG_MSM_MEMORY_DUMP_V2)
extern uint32_t msm_dump_table_version(void);
#else
static inline uint32_t msm_dump_table_version(void)
{
return 0;
}
#endif
#define MSM_DUMP_MAKE_VERSION(ma, mi) ((ma << 20) | mi)
#define MSM_DUMP_MAJOR(val) (val >> 20)
#define MSM_DUMP_MINOR(val) (val & 0xFFFFF)
#define MAX_NUM_ENTRIES 0x120
enum msm_dump_data_ids {
MSM_DUMP_DATA_CPU_CTX = 0x00,
MSM_DUMP_DATA_L1_INST_CACHE = 0x60,
MSM_DUMP_DATA_L1_DATA_CACHE = 0x80,
MSM_DUMP_DATA_ETM_REG = 0xA0,
MSM_DUMP_DATA_L2_CACHE = 0xC0,
MSM_DUMP_DATA_L3_CACHE = 0xD0,
MSM_DUMP_DATA_OCMEM = 0xE0,
MSM_DUMP_DATA_MISC = 0xE8,
MSM_DUMP_DATA_TMC_ETF = 0xF0,
MSM_DUMP_DATA_TMC_REG = 0x100,
MSM_DUMP_DATA_LOG_BUF = 0x110,
MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
};
enum msm_dump_table_ids {
MSM_DUMP_TABLE_APPS,
MSM_DUMP_TABLE_MAX = MAX_NUM_ENTRIES,
};
enum msm_dump_type {
MSM_DUMP_TYPE_DATA,
MSM_DUMP_TYPE_TABLE,
};
struct msm_dump_data {
uint32_t version;
uint32_t magic;
char name[32];
uint64_t addr;
uint64_t len;
uint32_t reserved;
};
struct msm_dump_entry {
uint32_t id;
char name[32];
uint32_t type;
uint64_t addr;
};
#ifdef CONFIG_MSM_MEMORY_DUMP_V2
extern int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry);
#else
static inline int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry)
{
return -ENOSYS;
}
#endif
#endif
| kondors1995/Soviet-kernel | include/soc/qcom/memory_dump.h | C | gpl-2.0 | 2,708 |
/* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\[^\{\]" 1 } } */
/* { dg-final { scan-assembler-times "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\[^\{\]" 1 } } */
/* { dg-final { scan-assembler-times "vpmovzxbq\[ \\t\]+\[^\n\]*%xmm\[0-9\]\[^\n\]*%zmm\[0-9\]\{%k\[1-7\]\}\{z\}" 1 } } */
#include <immintrin.h>
volatile __m128i s;
volatile __m512i res;
volatile __mmask8 m;
void extern
avx512f_test (void)
{
res = _mm512_cvtepu8_epi64 (s);
res = _mm512_mask_cvtepu8_epi64 (res, m, s);
res = _mm512_maskz_cvtepu8_epi64 (m, s);
}
| periclesroalves/gcc-conair | gcc/testsuite/gcc.target/i386/avx512f-vpmovzxbq-1.c | C | gpl-2.0 | 670 |
using System;
using System.Text;
namespace Server.Ethics.Evil
{
public sealed class UnholySense : Power
{
public UnholySense()
{
this.m_Definition = new PowerDefinition(
0,
"Unholy Sense",
"Drewrok Velgo",
"");
}
public override void BeginInvoke(Player from)
{
Ethic opposition = Ethic.Hero;
int enemyCount = 0;
int maxRange = 18 + from.Power;
Player primary = null;
foreach (Player pl in opposition.Players)
{
Mobile mob = pl.Mobile;
if (mob == null || mob.Map != from.Mobile.Map || !mob.Alive)
continue;
if (!mob.InRange(from.Mobile, Math.Max(18, maxRange - pl.Power)))
continue;
if (primary == null || pl.Power > primary.Power)
primary = pl;
++enemyCount;
}
StringBuilder sb = new StringBuilder();
sb.Append("You sense ");
sb.Append(enemyCount == 0 ? "no" : enemyCount.ToString());
sb.Append(enemyCount == 1 ? " enemy" : " enemies");
if (primary != null)
{
sb.Append(", and a strong presense");
switch ( from.Mobile.GetDirectionTo(primary.Mobile) )
{
case Direction.West:
sb.Append(" to the west.");
break;
case Direction.East:
sb.Append(" to the east.");
break;
case Direction.North:
sb.Append(" to the north.");
break;
case Direction.South:
sb.Append(" to the south.");
break;
case Direction.Up:
sb.Append(" to the north-west.");
break;
case Direction.Down:
sb.Append(" to the south-east.");
break;
case Direction.Left:
sb.Append(" to the south-west.");
break;
case Direction.Right:
sb.Append(" to the north-east.");
break;
}
}
else
{
sb.Append('.');
}
from.Mobile.LocalOverheadMessage(Server.Network.MessageType.Regular, 0x59, false, sb.ToString());
this.FinishInvoke(from);
}
}
} | LeonG-ZA/JustUO-KR | Scripts/Services/Ethics/Evil/Powers/UnholySense.cs | C# | gpl-3.0 | 2,730 |
#!/bin/bash
export PATH=$HOME/.local/bin:/usr/local/bin:$HOME/prefix/bin:$HOME/APM/px4/gcc-arm-none-eabi-4_7-2014q2/bin:$PATH
export PYTHONUNBUFFERED=1
export PYTHONPATH=$HOME/APM
cd $HOME/APM || exit 1
test -n "$FORCEBUILD" || {
(cd APM && git fetch > /dev/null 2>&1)
newtags=$(cd APM && git fetch --tags | wc -l)
oldhash=$(cd APM && git rev-parse origin/master)
newhash=$(cd APM && git rev-parse HEAD)
newtagspx4=$(cd PX4Firmware && git fetch --tags | wc -l)
oldhashpx4=$(cd PX4Firmware && git rev-parse origin/master)
newhashpx4=$(cd PX4Firmware && git rev-parse HEAD)
newtagsnuttx=$(cd PX4NuttX && git fetch --tags | wc -l)
oldhashnuttx=$(cd PX4NuttX && git rev-parse origin/master)
newhashnuttx=$(cd PX4NuttX && git rev-parse HEAD)
newtagsuavcan=$(cd uavcan && git fetch --tags | wc -l)
oldhashuavcan=$(cd uavcan && git rev-parse origin/master)
newhashuavcan=$(cd uavcan && git rev-parse HEAD)
if [ "$oldhash" = "$newhash" -a "$newtags" = "0" -a "$oldhashpx4" = "$newhashpx4" -a "$newtagspx4" = "0" -a "$oldhashnuttx" = "$newhashnuttx" -a "$newtagsnuttx" = "0" -a "$oldhashuavcan" = "$newhashuavcan" -a "$newtagsuavcan" = "0" ]; then
echo "no change $oldhash $newhash `date`" >> build.log
exit 0
fi
}
############################
# grab a lock file. Not atomic, but close :)
# tries to cope with NFS
lock_file() {
lck="$1"
pid=`cat "$lck" 2> /dev/null`
if test -f "$lck" && kill -0 $pid 2> /dev/null; then
LOCKAGE=$(($(date +%s) - $(stat -c '%Y' "build.lck")))
test $LOCKAGE -gt 7200 && {
echo "old lock file $lck is valid for $pid with age $LOCKAGE seconds"
}
return 1
fi
/bin/rm -f "$lck"
echo "$$" > "$lck"
return 0
}
lock_file build.lck || {
exit 1
}
#ulimit -m 500000
#ulimit -s 500000
#ulimit -t 1800
#ulimit -v 500000
(
date
report() {
d="$1"
old="$2"
new="$3"
cat <<EOF | mail -s 'build failed' drones-discuss@googlegroups.com
A build of $d failed at `date`
You can view the build logs at http://autotest.diydrones.com/
A log of the commits since the last attempted build is below
`git log $old $new`
EOF
}
report_pull_failure() {
d="$1"
git show origin/master | mail -s 'APM pull failed' drones-discuss@googlegroups.com
exit 1
}
oldhash=$(cd APM && git rev-parse HEAD)
pushd APM
git checkout -f master
git fetch origin
git reset --hard origin/master
git pull || report_pull_failure
git clean -f -f -x -d -d
git tag autotest-$(date '+%Y-%m-%d-%H%M%S') -m "test tag `date`"
cp ../config.mk .
popd
rsync -a APM/Tools/autotest/web-firmware/ buildlogs/binaries/
pushd PX4Firmware
git fetch origin
git reset --hard origin/master
for v in ArduPlane ArduCopter APMrover2; do
git tag -d $v-beta || true
git tag -d $v-stable || true
done
git fetch origin --tags
git show
popd
pushd PX4NuttX
git fetch origin
git reset --hard origin/master
for v in ArduPlane ArduCopter APMrover2; do
git tag -d $v-beta || true
git tag -d $v-stable || true
done
git fetch origin --tags
git show
popd
pushd uavcan
git fetch origin
git reset --hard origin/master
for v in ArduPlane ArduCopter APMrover2; do
git tag -d $v-beta || true
git tag -d $v-stable || true
done
git fetch origin --tags
git show
popd
echo "Updating pymavlink"
pushd mavlink/pymavlink
git fetch origin
git reset --hard origin/master
git show
python setup.py build install --user
popd
echo "Updating MAVProxy"
pushd MAVProxy
git fetch origin
git reset --hard origin/master
git show
python setup.py build install --user
popd
githash=$(cd APM && git rev-parse HEAD)
hdate=$(date +"%Y-%m-%d-%H:%m")
for d in ArduPlane ArduCopter APMrover2 AntennaTracker; do
pushd APM/$d
rm -rf ../../buildlogs/$d.build
(date && TMPDIR=../../buildlogs make) > ../../buildlogs/$d.txt 2>&1
status=$?
if [ $status != 0 ]; then
report $d $oldhash $newhash
fi
popd
APM/Tools/scripts/frame_sizes.py buildlogs/$d.build > buildlogs/$d.framesizes.txt
(
avr-size buildlogs/$d.build/$d.elf
avr-nm --size-sort --print-size -C buildlogs/$d.build/$d.elf
) > buildlogs/$d.sizes.txt
done
mkdir -p "buildlogs/history/$hdate"
(cd buildlogs && cp -f *.txt *.flashlog *.tlog *.km[lz] *.gpx *.html *.png *.bin *.BIN *.elf "history/$hdate/")
echo $githash > "buildlogs/history/$hdate/githash.txt"
(cd APM && Tools/scripts/build_parameters.sh)
(cd APM && Tools/scripts/build_docs.sh)
killall -9 JSBSim || /bin/true
# raise core limit
ulimit -c 10000000
timelimit 12000 APM/Tools/autotest/autotest.py --timeout=11500 > buildlogs/autotest-output.txt 2>&1
) >> build.log 2>&1
| raydtang/ardupilot | Tools/scripts/build_autotest.sh | Shell | gpl-3.0 | 4,629 |
/**
* Appcelerator Titanium Mobile
* Copyright (c) 2010 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Apache Public License
* Please see the LICENSE included with this distribution for details.
*/
package org.appcelerator.kroll;
public class KrollPropertyChange {
protected String name;
protected Object oldValue, newValue;
public KrollPropertyChange(String name, Object oldValue, Object newValue) {
this.name = name;
this.oldValue = oldValue;
this.newValue = newValue;
}
public void fireEvent(KrollProxy proxy, KrollProxyListener listener) {
if (listener != null) {
listener.propertyChanged(name, oldValue, newValue, proxy);
}
}
public String getName() {
return name;
}
public Object getOldValue() {
return oldValue;
}
public Object getNewValue() {
return newValue;
}
}
| arnaudsj/titanium_mobile | android/titanium/src/org/appcelerator/kroll/KrollPropertyChange.java | Java | apache-2.0 | 846 |
/* Copyright (c) 2011 by The Authors.
* Published under the LGPL 2.1 license.
* See /license-notice.txt for the full text of the license notice.
* See /license.txt for the full text of the license.
*/
/**
* Supplies a set of utility methods for building Geometry objects from lists
* of Coordinates.
*
* Note that the factory constructor methods do <b>not</b> change the input
* coordinates in any way.
*
* In particular, they are not rounded to the supplied <tt>PrecisionModel</tt>.
* It is assumed that input Coordinates meet the given precision.
*/
/**
* @requires jsts/geom/PrecisionModel.js
*/
/**
* Constructs a GeometryFactory that generates Geometries having a floating
* PrecisionModel and a spatial-reference ID of 0.
*
* @constructor
*/
jsts.geom.GeometryFactory = function(precisionModel) {
this.precisionModel = precisionModel || new jsts.geom.PrecisionModel();
};
jsts.geom.GeometryFactory.prototype.precisionModel = null;
jsts.geom.GeometryFactory.prototype.getPrecisionModel = function() {
return this.precisionModel;
};
/**
* Creates a Point using the given Coordinate; a null Coordinate will create an
* empty Geometry.
*
* @param {Coordinate}
* coordinate Coordinate to base this Point on.
* @return {Point} A new Point.
*/
jsts.geom.GeometryFactory.prototype.createPoint = function(coordinate) {
var point = new jsts.geom.Point(coordinate, this);
return point;
};
/**
* Creates a LineString using the given Coordinates; a null or empty array will
* create an empty LineString. Consecutive points must not be equal.
*
* @param {Coordinate[]}
* coordinates an array without null elements, or an empty array, or
* null.
* @return {LineString} A new LineString.
*/
jsts.geom.GeometryFactory.prototype.createLineString = function(coordinates) {
var lineString = new jsts.geom.LineString(coordinates, this);
return lineString;
};
/**
* Creates a LinearRing using the given Coordinates; a null or empty array will
* create an empty LinearRing. The points must form a closed and simple
* linestring. Consecutive points must not be equal.
*
* @param {Coordinate[]}
* coordinates an array without null elements, or an empty array, or
* null.
* @return {LinearRing} A new LinearRing.
*/
jsts.geom.GeometryFactory.prototype.createLinearRing = function(coordinates) {
var linearRing = new jsts.geom.LinearRing(coordinates, this);
return linearRing;
};
/**
* Constructs a <code>Polygon</code> with the given exterior boundary and
* interior boundaries.
*
* @param {LinearRing}
* shell the outer boundary of the new <code>Polygon</code>, or
* <code>null</code> or an empty <code>LinearRing</code> if the
* empty geometry is to be created.
* @param {LinearRing[]}
* holes the inner boundaries of the new <code>Polygon</code>, or
* <code>null</code> or empty <code>LinearRing</code> s if the
* empty geometry is to be created.
* @return {Polygon} A new Polygon.
*/
jsts.geom.GeometryFactory.prototype.createPolygon = function(shell, holes) {
var polygon = new jsts.geom.Polygon(shell, holes, this);
return polygon;
};
jsts.geom.GeometryFactory.prototype.createMultiPoint = function(points) {
if (points && points[0] instanceof jsts.geom.Coordinate) {
var converted = [];
var i;
for (i = 0; i < points.length; i++) {
converted.push(this.createPoint(points[i]));
}
points = converted;
}
return new jsts.geom.MultiPoint(points, this);
};
jsts.geom.GeometryFactory.prototype.createMultiLineString = function(
lineStrings) {
return new jsts.geom.MultiLineString(lineStrings, this);
};
jsts.geom.GeometryFactory.prototype.createMultiPolygon = function(polygons) {
return new jsts.geom.MultiPolygon(polygons, this);
};
/**
* Build an appropriate <code>Geometry</code>, <code>MultiGeometry</code>,
* or <code>GeometryCollection</code> to contain the <code>Geometry</code>s
* in it. For example:<br>
*
* <ul>
* <li> If <code>geomList</code> contains a single <code>Polygon</code>,
* the <code>Polygon</code> is returned.
* <li> If <code>geomList</code> contains several <code>Polygon</code>s, a
* <code>MultiPolygon</code> is returned.
* <li> If <code>geomList</code> contains some <code>Polygon</code>s and
* some <code>LineString</code>s, a <code>GeometryCollection</code> is
* returned.
* <li> If <code>geomList</code> is empty, an empty
* <code>GeometryCollection</code> is returned
* </ul>
*
* Note that this method does not "flatten" Geometries in the input, and hence
* if any MultiGeometries are contained in the input a GeometryCollection
* containing them will be returned.
*
* @param geomList
* the <code>Geometry</code>s to combine.
* @return {Geometry} a <code>Geometry</code> of the "smallest", "most
* type-specific" class that can contain the elements of
* <code>geomList</code> .
*/
jsts.geom.GeometryFactory.prototype.buildGeometry = function(geomList) {
/**
* Determine some facts about the geometries in the list
*/
var geomClass = null;
var isHeterogeneous = false;
var hasGeometryCollection = false;
for (var i = geomList.iterator(); i.hasNext();) {
var geom = i.next();
var partClass = geom.CLASS_NAME;
if (geomClass === null) {
geomClass = partClass;
}
if (!(partClass === geomClass)) {
isHeterogeneous = true;
}
if (geom.isGeometryCollectionBase())
hasGeometryCollection = true;
}
/**
* Now construct an appropriate geometry to return
*/
// for the empty geometry, return an empty GeometryCollection
if (geomClass === null) {
return this.createGeometryCollection(null);
}
if (isHeterogeneous || hasGeometryCollection) {
return this.createGeometryCollection(geomList.toArray());
}
// at this point we know the collection is hetereogenous.
// Determine the type of the result from the first Geometry in the list
// this should always return a geometry, since otherwise an empty collection
// would have already been returned
var geom0 = geomList.get(0);
var isCollection = geomList.size() > 1;
if (isCollection) {
if (geom0 instanceof jsts.geom.Polygon) {
return this.createMultiPolygon(geomList.toArray());
} else if (geom0 instanceof jsts.geom.LineString) {
return this.createMultiLineString(geomList.toArray());
} else if (geom0 instanceof jsts.geom.Point) {
return this.createMultiPoint(geomList.toArray());
}
jsts.util.Assert.shouldNeverReachHere('Unhandled class: ' + geom0);
}
return geom0;
};
jsts.geom.GeometryFactory.prototype.createGeometryCollection = function(
geometries) {
return new jsts.geom.GeometryCollection(geometries, this);
};
/**
* Creates a {@link Geometry} with the same extent as the given envelope. The
* Geometry returned is guaranteed to be valid. To provide this behaviour, the
* following cases occur:
* <p>
* If the <code>Envelope</code> is:
* <ul>
* <li>null : returns an empty {@link Point}
* <li>a point : returns a non-empty {@link Point}
* <li>a line : returns a two-point {@link LineString}
* <li>a rectangle : returns a {@link Polygon}> whose points are (minx, miny),
* (minx, maxy), (maxx, maxy), (maxx, miny), (minx, miny).
* </ul>
*
* @param {jsts.geom.Envelope}
* envelope the <code>Envelope</code> to convert.
* @return {jsts.geom.Geometry} an empty <code>Point</code> (for null
* <code>Envelope</code>s), a <code>Point</code> (when min x = max
* x and min y = max y) or a <code>Polygon</code> (in all other cases).
*/
jsts.geom.GeometryFactory.prototype.toGeometry = function(envelope) {
// null envelope - return empty point geometry
if (envelope.isNull()) {
return this.createPoint(null);
}
// point?
if (envelope.getMinX() === envelope.getMaxX() &&
envelope.getMinY() === envelope.getMaxY()) {
return this.createPoint(new jsts.geom.Coordinate(envelope.getMinX(),
envelope.getMinY()));
}
// vertical or horizontal line?
if (envelope.getMinX() === envelope.getMaxX() ||
envelope.getMinY() === envelope.getMaxY()) {
return this.createLineString([
new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY()),
new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMaxY())]);
}
// create a CW ring for the polygon
return this.createPolygon(this.createLinearRing([
new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY()),
new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMaxY()),
new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMaxY()),
new jsts.geom.Coordinate(envelope.getMaxX(), envelope.getMinY()),
new jsts.geom.Coordinate(envelope.getMinX(), envelope.getMinY())]), null);
};
| mileswwatkins/ABiteBetweenUs | utilities/scripts/jsts/src/jsts/geom/GeometryFactory.js | JavaScript | apache-2.0 | 8,870 |
class Ipcalc < Formula
homepage "http://jodies.de/ipcalc"
url "http://jodies.de/ipcalc-archive/ipcalc-0.41.tar.gz"
sha256 "dda9c571ce3369e5b6b06e92790434b54bec1f2b03f1c9df054c0988aa4e2e8a"
def install
bin.install "ipcalc"
end
test do
system "#{bin}/ipcalc", "--nobinary", "192.168.0.1/24"
end
end
| bendemaree/homebrew | Library/Formula/ipcalc.rb | Ruby | bsd-2-clause | 321 |
/****************************************************************************
**
** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of the qmake spec of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 as published by the Free Software
** Foundation and appearing in the file LICENSE.LGPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU Lesser General Public License version 2.1 requirements
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3.0 as published by the Free Software
** Foundation and appearing in the file LICENSE.GPL included in the
** packaging of this file. Please review the following information to
** ensure the GNU General Public License version 3.0 requirements will be
** met: http://www.gnu.org/copyleft/gpl.html.
**
**
** $QT_END_LICENSE$
**
****************************************************************************/
#ifndef Q_POSIX_QPLATFORMDEFS_H
#define Q_POSIX_QPLATFORMDEFS_H
#include <signal.h>
#include <sys/types.h>
#ifndef QT_NO_SOCKET_H
# include <sys/socket.h>
#endif
#include <sys/stat.h>
#if defined(QT_USE_XOPEN_LFS_EXTENSIONS) && defined(QT_LARGEFILE_SUPPORT)
#define QT_STATBUF struct stat64
#define QT_FPOS_T fpos64_t
#define QT_OFF_T off64_t
#define QT_STAT ::stat64
#define QT_LSTAT ::lstat64
#define QT_TRUNCATE ::truncate64
// File I/O
#define QT_OPEN ::open64
#define QT_LSEEK ::lseek64
#define QT_FSTAT ::fstat64
#define QT_FTRUNCATE ::ftruncate64
// Standard C89
#define QT_FOPEN ::fopen64
#define QT_FSEEK ::fseeko64
#define QT_FTELL ::ftello64
#define QT_FGETPOS ::fgetpos64
#define QT_FSETPOS ::fsetpos64
#define QT_MMAP ::mmap64
#else // !defined(QT_USE_XOPEN_LFS_EXTENSIONS) || !defined(QT_LARGEFILE_SUPPORT)
#include "../c89/qplatformdefs.h"
#define QT_STATBUF struct stat
#define QT_STAT ::stat
#define QT_LSTAT ::lstat
#define QT_TRUNCATE ::truncate
// File I/O
#define QT_OPEN ::open
#define QT_LSEEK ::lseek
#define QT_FSTAT ::fstat
#define QT_FTRUNCATE ::ftruncate
// Posix extensions to C89
#if !defined(QT_USE_XOPEN_LFS_EXTENSIONS) && !defined(QT_NO_USE_FSEEKO)
#undef QT_OFF_T
#undef QT_FSEEK
#undef QT_FTELL
#define QT_OFF_T off_t
#define QT_FSEEK ::fseeko
#define QT_FTELL ::ftello
#endif
#define QT_MMAP ::mmap
#endif // !defined (QT_USE_XOPEN_LFS_EXTENSIONS) || !defined(QT_LARGEFILE_SUPPORT)
#define QT_STAT_MASK S_IFMT
#define QT_STAT_REG S_IFREG
#define QT_STAT_DIR S_IFDIR
#define QT_STAT_LNK S_IFLNK
#define QT_ACCESS ::access
#define QT_GETCWD ::getcwd
#define QT_CHDIR ::chdir
#define QT_MKDIR ::mkdir
#define QT_RMDIR ::rmdir
// File I/O
#define QT_CLOSE ::close
#define QT_READ ::read
#define QT_WRITE ::write
#define QT_OPEN_LARGEFILE O_LARGEFILE
#define QT_OPEN_RDONLY O_RDONLY
#define QT_OPEN_WRONLY O_WRONLY
#define QT_OPEN_RDWR O_RDWR
#define QT_OPEN_CREAT O_CREAT
#define QT_OPEN_TRUNC O_TRUNC
#define QT_OPEN_APPEND O_APPEND
// Posix extensions to C89
#define QT_FILENO fileno
// Directory iteration
#define QT_DIR DIR
#define QT_OPENDIR ::opendir
#define QT_CLOSEDIR ::closedir
#if defined(QT_LARGEFILE_SUPPORT) \
&& defined(QT_USE_XOPEN_LFS_EXTENSIONS) \
&& !defined(QT_NO_READDIR64)
#define QT_DIRENT struct dirent64
#define QT_READDIR ::readdir64
#define QT_READDIR_R ::readdir64_r
#else
#define QT_DIRENT struct dirent
#define QT_READDIR ::readdir
#define QT_READDIR_R ::readdir_r
#endif
#define QT_SOCKLEN_T socklen_t
#define QT_SOCKET_CONNECT ::connect
#define QT_SOCKET_BIND ::bind
#define QT_SIGNAL_RETTYPE void
#define QT_SIGNAL_ARGS int
#define QT_SIGNAL_IGNORE SIG_IGN
#endif // include guard
| you21979/phantomjs | src/qt/qtbase/mkspecs/common/posix/qplatformdefs.h | C | bsd-3-clause | 5,472 |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("cs_test")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("MSIT")]
[assembly: AssemblyProduct("cs_test")]
[assembly: AssemblyCopyright("Copyright © MSIT 2012")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("74451adb-817c-45fa-af74-71fd22936907")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
| chriszeng8/vowpal_wabbit | cs_test/Properties/AssemblyInfo.cs | C# | bsd-3-clause | 1,434 |
#!/usr/bin/env python
"""Sample Input Reader for map job."""
import random
import string
import time
from mapreduce import context
from mapreduce import errors
from mapreduce import operation
from mapreduce.api import map_job
# pylint: disable=invalid-name
# Counter name for number of bytes read.
COUNTER_IO_READ_BYTES = "io-read-bytes"
# Counter name for milliseconds spent reading data.
COUNTER_IO_READ_MSEC = "io-read-msec"
class SampleInputReader(map_job.InputReader):
"""A sample InputReader that generates random strings as output.
Primary usage is to as an example InputReader that can be use for test
purposes.
"""
# Total number of entries this reader should generate.
COUNT = "count"
# Length of the generated strings.
STRING_LENGTH = "string_length"
# The default string length if one is not specified.
_DEFAULT_STRING_LENGTH = 10
def __init__(self, count, string_length):
"""Initialize input reader.
Args:
count: number of entries this shard should generate.
string_length: the length of generated random strings.
"""
self._count = count
self._string_length = string_length
def __iter__(self):
ctx = context.get()
while self._count:
self._count -= 1
start_time = time.time()
content = "".join(random.choice(string.ascii_lowercase)
for _ in range(self._string_length))
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
yield content
@classmethod
def from_json(cls, state):
"""Inherit docs."""
return cls(state[cls.COUNT], state[cls.STRING_LENGTH])
def to_json(self):
"""Inherit docs."""
return {self.COUNT: self._count, self.STRING_LENGTH: self._string_length}
@classmethod
def split_input(cls, job_config):
"""Inherit docs."""
params = job_config.input_reader_params
count = params[cls.COUNT]
string_length = params.get(cls.STRING_LENGTH, cls._DEFAULT_STRING_LENGTH)
shard_count = job_config.shard_count
count_per_shard = count // shard_count
mr_input_readers = [
cls(count_per_shard, string_length) for _ in range(shard_count)]
left = count - count_per_shard*shard_count
if left > 0:
mr_input_readers.append(cls(left, string_length))
return mr_input_readers
@classmethod
def validate(cls, job_config):
"""Inherit docs."""
super(SampleInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Validate count.
if cls.COUNT not in params:
raise errors.BadReaderParamsError("Must specify %s" % cls.COUNT)
if not isinstance(params[cls.COUNT], int):
raise errors.BadReaderParamsError("%s should be an int but is %s" %
(cls.COUNT, type(params[cls.COUNT])))
if params[cls.COUNT] <= 0:
raise errors.BadReaderParamsError("%s should be a positive int")
# Validate string length.
if cls.STRING_LENGTH in params and not (
isinstance(params[cls.STRING_LENGTH], int) and
params[cls.STRING_LENGTH] > 0):
raise errors.BadReaderParamsError("%s should be a positive int "
"but is %s" %
(cls.STRING_LENGTH,
params[cls.STRING_LENGTH]))
| sahiljain/catapult | third_party/mapreduce/mapreduce/api/map_job/sample_input_reader.py | Python | bsd-3-clause | 3,468 |
<!DOCTYPE html>
<!--
Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
-->
<html>
<head>
<meta charset="utf-8">
<title>Append To Page Element Using JavaScript Code — CKEditor Sample</title>
<script src="../../ckeditor.js"></script>
<link rel="stylesheet" href="sample.css">
</head>
<body>
<h1 class="samples">
<a href="index.html">CKEditor Samples</a> » Append To Page Element Using JavaScript Code
</h1>
<div class="warning deprecated">
This sample is not maintained anymore. Check out the <a href="http://sdk.ckeditor.com/">brand new samples in CKEditor SDK</a>.
</div>
<div id="section1">
<div class="description">
<p>
The <code><a class="samples" href="http://docs.ckeditor.com/#!/api/CKEDITOR-method-appendTo">CKEDITOR.appendTo()</a></code> method serves to to place editors inside existing DOM elements. Unlike <code><a class="samples" href="http://docs.ckeditor.com/#!/api/CKEDITOR-method-replace">CKEDITOR.replace()</a></code>,
a target container to be replaced is no longer necessary. A new editor
instance is inserted directly wherever it is desired.
</p>
<pre class="samples">CKEDITOR.appendTo( '<em>container_id</em>',
{ /* Configuration options to be used. */ }
'Editor content to be used.'
);</pre>
</div>
<script>
// This call can be placed at any point after the
// DOM element to append CKEditor to or inside the <head><script>
// in a window.onload event handler.
// Append a CKEditor instance using the default configuration and the
// provided content to the <div> element of ID "section1".
CKEDITOR.appendTo( 'section1',
null,
'<p>This is some <strong>sample text</strong>. You are using <a href="http://ckeditor.com/">CKEditor</a>.</p>'
);
</script>
</div>
<br>
<div id="footer">
<hr>
<p>
CKEditor - The text editor for the Internet - <a class="samples" href="http://ckeditor.com/">http://ckeditor.com</a>
</p>
<p id="copy">
Copyright © 2003-2015, <a class="samples" href="http://cksource.com/">CKSource</a> - Frederico
Knabben. All rights reserved.
</p>
</div>
</body>
</html>
| x-meta/xworker | xworker_explorer/xworker/webroot/fckeditor/4_1/samples/old/appendto.html | HTML | apache-2.0 | 2,259 |
/**
* angular-strap
* @version v2.1.6 - 2015-01-11
* @link http://mgcrea.github.io/angular-strap
* @author Olivier Louvignes (olivier@mg-crea.com)
* @license MIT License, http://www.opensource.org/licenses/MIT
*/
"use strict";angular.module("mgcrea.ngStrap.helpers.dateFormatter",[]).service("$dateFormatter",["$locale","dateFilter",function(t,e){function r(t){return/(h+)([:\.])?(m+)[ ]?(a?)/i.exec(t).slice(1)}this.getDefaultLocale=function(){return t.id},this.getDatetimeFormat=function(e){return t.DATETIME_FORMATS[e]||e},this.weekdaysShort=function(){return t.DATETIME_FORMATS.SHORTDAY},this.hoursFormat=function(t){return r(t)[0]},this.minutesFormat=function(t){return r(t)[2]},this.timeSeparator=function(t){return r(t)[1]},this.showAM=function(t){return!!r(t)[3]},this.formatDate=function(t,r){return e(t,r)}}]);
//# sourceMappingURL=date-formatter.min.js.map | spawashe/poc-mango-cad | www/lib/bower_components/angular-strap/dist/modules/date-formatter.min.js | JavaScript | mit | 873 |
/* AFS superblock handling
*
* Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
*
* This software may be freely redistributed under the terms of the
* GNU General Public License.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Authors: David Howells <dhowells@redhat.com>
* David Woodhouse <dwmw2@infradead.org>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/statfs.h>
#include <linux/sched.h>
#include "internal.h"
#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
static void afs_i_init_once(void *foo);
static int afs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data, struct vfsmount *mnt);
static struct inode *afs_alloc_inode(struct super_block *sb);
static void afs_put_super(struct super_block *sb);
static void afs_destroy_inode(struct inode *inode);
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
struct file_system_type afs_fs_type = {
.owner = THIS_MODULE,
.name = "afs",
.get_sb = afs_get_sb,
.kill_sb = kill_anon_super,
.fs_flags = 0,
};
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
.destroy_inode = afs_destroy_inode,
.clear_inode = afs_clear_inode,
.put_super = afs_put_super,
.show_options = generic_show_options,
};
static struct kmem_cache *afs_inode_cachep;
static atomic_t afs_count_active_inodes;
enum {
afs_no_opt,
afs_opt_cell,
afs_opt_rwpath,
afs_opt_vol,
};
static const match_table_t afs_options_list = {
{ afs_opt_cell, "cell=%s" },
{ afs_opt_rwpath, "rwpath" },
{ afs_opt_vol, "vol=%s" },
{ afs_no_opt, NULL },
};
/*
* initialise the filesystem
*/
int __init afs_fs_init(void)
{
int ret;
_enter("");
/* create ourselves an inode cache */
atomic_set(&afs_count_active_inodes, 0);
ret = -ENOMEM;
afs_inode_cachep = kmem_cache_create("afs_inode_cache",
sizeof(struct afs_vnode),
0,
SLAB_HWCACHE_ALIGN,
afs_i_init_once);
if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
return ret;
}
/* now export our filesystem to lesser mortals */
ret = register_filesystem(&afs_fs_type);
if (ret < 0) {
kmem_cache_destroy(afs_inode_cachep);
_leave(" = %d", ret);
return ret;
}
_leave(" = 0");
return 0;
}
/*
* clean up the filesystem
*/
void __exit afs_fs_exit(void)
{
_enter("");
afs_mntpt_kill_timer();
unregister_filesystem(&afs_fs_type);
if (atomic_read(&afs_count_active_inodes) != 0) {
printk("kAFS: %d active inode objects still present\n",
atomic_read(&afs_count_active_inodes));
BUG();
}
kmem_cache_destroy(afs_inode_cachep);
_leave("");
}
/*
* parse the mount options
* - this function has been shamelessly adapted from the ext3 fs which
* shamelessly adapted it from the msdos fs
*/
static int afs_parse_options(struct afs_mount_params *params,
char *options, const char **devname)
{
struct afs_cell *cell;
substring_t args[MAX_OPT_ARGS];
char *p;
int token;
_enter("%s", options);
options[PAGE_SIZE - 1] = 0;
while ((p = strsep(&options, ","))) {
if (!*p)
continue;
token = match_token(p, afs_options_list, args);
switch (token) {
case afs_opt_cell:
cell = afs_cell_lookup(args[0].from,
args[0].to - args[0].from);
if (IS_ERR(cell))
return PTR_ERR(cell);
afs_put_cell(params->cell);
params->cell = cell;
break;
case afs_opt_rwpath:
params->rwpath = 1;
break;
case afs_opt_vol:
*devname = args[0].from;
break;
default:
printk(KERN_ERR "kAFS:"
" Unknown or invalid mount option: '%s'\n", p);
return -EINVAL;
}
}
_leave(" = 0");
return 0;
}
/*
* parse a device name to get cell name, volume name, volume type and R/W
* selector
* - this can be one of the following:
* "%[cell:]volume[.]" R/W volume
* "#[cell:]volume[.]" R/O or R/W volume (rwpath=0),
* or R/W (rwpath=1) volume
* "%[cell:]volume.readonly" R/O volume
* "#[cell:]volume.readonly" R/O volume
* "%[cell:]volume.backup" Backup volume
* "#[cell:]volume.backup" Backup volume
*/
static int afs_parse_device_name(struct afs_mount_params *params,
const char *name)
{
struct afs_cell *cell;
const char *cellname, *suffix;
int cellnamesz;
_enter(",%s", name);
if (!name) {
printk(KERN_ERR "kAFS: no volume name specified\n");
return -EINVAL;
}
if ((name[0] != '%' && name[0] != '#') || !name[1]) {
printk(KERN_ERR "kAFS: unparsable volume name\n");
return -EINVAL;
}
/* determine the type of volume we're looking for */
params->type = AFSVL_ROVOL;
params->force = false;
if (params->rwpath || name[0] == '%') {
params->type = AFSVL_RWVOL;
params->force = true;
}
name++;
/* split the cell name out if there is one */
params->volname = strchr(name, ':');
if (params->volname) {
cellname = name;
cellnamesz = params->volname - name;
params->volname++;
} else {
params->volname = name;
cellname = NULL;
cellnamesz = 0;
}
/* the volume type is further affected by a possible suffix */
suffix = strrchr(params->volname, '.');
if (suffix) {
if (strcmp(suffix, ".readonly") == 0) {
params->type = AFSVL_ROVOL;
params->force = true;
} else if (strcmp(suffix, ".backup") == 0) {
params->type = AFSVL_BACKVOL;
params->force = true;
} else if (suffix[1] == 0) {
} else {
suffix = NULL;
}
}
params->volnamesz = suffix ?
suffix - params->volname : strlen(params->volname);
_debug("cell %*.*s [%p]",
cellnamesz, cellnamesz, cellname ?: "", params->cell);
/* lookup the cell record */
if (cellname || !params->cell) {
cell = afs_cell_lookup(cellname, cellnamesz);
if (IS_ERR(cell)) {
printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
cellname ?: "");
return PTR_ERR(cell);
}
afs_put_cell(params->cell);
params->cell = cell;
}
_debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
params->cell->name, params->cell,
params->volnamesz, params->volnamesz, params->volname,
suffix ?: "-", params->type, params->force ? " FORCE" : "");
return 0;
}
/*
* check a superblock to see if it's the one we're looking for
*/
static int afs_test_super(struct super_block *sb, void *data)
{
struct afs_mount_params *params = data;
struct afs_super_info *as = sb->s_fs_info;
return as->volume == params->volume;
}
/*
* fill in the superblock
*/
static int afs_fill_super(struct super_block *sb, void *data)
{
struct afs_mount_params *params = data;
struct afs_super_info *as = NULL;
struct afs_fid fid;
struct dentry *root = NULL;
struct inode *inode = NULL;
int ret;
_enter("");
/* allocate a superblock info record */
as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
if (!as) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
afs_get_volume(params->volume);
as->volume = params->volume;
/* fill in the superblock */
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_fs_info = as;
sb->s_bdi = &as->volume->bdi;
/* allocate the root inode and dentry */
fid.vid = as->volume->vid;
fid.vnode = 1;
fid.unique = 1;
inode = afs_iget(sb, params->key, &fid, NULL, NULL);
if (IS_ERR(inode))
goto error_inode;
ret = -ENOMEM;
root = d_alloc_root(inode);
if (!root)
goto error;
sb->s_root = root;
_leave(" = 0");
return 0;
error_inode:
ret = PTR_ERR(inode);
inode = NULL;
error:
iput(inode);
afs_put_volume(as->volume);
kfree(as);
sb->s_fs_info = NULL;
_leave(" = %d", ret);
return ret;
}
/*
* get an AFS superblock
*/
static int afs_get_sb(struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *options,
struct vfsmount *mnt)
{
struct afs_mount_params params;
struct super_block *sb;
struct afs_volume *vol;
struct key *key;
char *new_opts = kstrdup(options, GFP_KERNEL);
int ret;
_enter(",,%s,%p", dev_name, options);
memset(¶ms, 0, sizeof(params));
/* parse the options and device name */
if (options) {
ret = afs_parse_options(¶ms, options, &dev_name);
if (ret < 0)
goto error;
}
ret = afs_parse_device_name(¶ms, dev_name);
if (ret < 0)
goto error;
/* try and do the mount securely */
key = afs_request_key(params.cell);
if (IS_ERR(key)) {
_leave(" = %ld [key]", PTR_ERR(key));
ret = PTR_ERR(key);
goto error;
}
params.key = key;
/* parse the device name */
vol = afs_volume_lookup(¶ms);
if (IS_ERR(vol)) {
ret = PTR_ERR(vol);
goto error;
}
params.volume = vol;
/* allocate a deviceless superblock */
sb = sget(fs_type, afs_test_super, set_anon_super, ¶ms);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
goto error;
}
if (!sb->s_root) {
/* initial superblock/root creation */
_debug("create");
sb->s_flags = flags;
ret = afs_fill_super(sb, ¶ms);
if (ret < 0) {
deactivate_locked_super(sb);
goto error;
}
save_mount_options(sb, new_opts);
sb->s_flags |= MS_ACTIVE;
} else {
_debug("reuse");
ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
}
simple_set_mnt(mnt, sb);
afs_put_volume(params.volume);
afs_put_cell(params.cell);
kfree(new_opts);
_leave(" = 0 [%p]", sb);
return 0;
error:
afs_put_volume(params.volume);
afs_put_cell(params.cell);
key_put(params.key);
kfree(new_opts);
_leave(" = %d", ret);
return ret;
}
/*
* finish the unmounting process on the superblock
*/
static void afs_put_super(struct super_block *sb)
{
struct afs_super_info *as = sb->s_fs_info;
_enter("");
lock_kernel();
afs_put_volume(as->volume);
unlock_kernel();
_leave("");
}
/*
* initialise an inode cache slab element prior to any use
*/
static void afs_i_init_once(void *_vnode)
{
struct afs_vnode *vnode = _vnode;
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->vfs_inode);
init_waitqueue_head(&vnode->update_waitq);
mutex_init(&vnode->permits_lock);
mutex_init(&vnode->validate_lock);
spin_lock_init(&vnode->writeback_lock);
spin_lock_init(&vnode->lock);
INIT_LIST_HEAD(&vnode->writebacks);
INIT_LIST_HEAD(&vnode->pending_locks);
INIT_LIST_HEAD(&vnode->granted_locks);
INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
}
/*
* allocate an AFS inode struct from our slab cache
*/
static struct inode *afs_alloc_inode(struct super_block *sb)
{
struct afs_vnode *vnode;
vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
if (!vnode)
return NULL;
atomic_inc(&afs_count_active_inodes);
memset(&vnode->fid, 0, sizeof(vnode->fid));
memset(&vnode->status, 0, sizeof(vnode->status));
vnode->volume = NULL;
vnode->update_cnt = 0;
vnode->flags = 1 << AFS_VNODE_UNSET;
vnode->cb_promised = false;
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
}
/*
* destroy an AFS inode struct
*/
static void afs_destroy_inode(struct inode *inode)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode);
_debug("DESTROY INODE %p", inode);
ASSERTCMP(vnode->server, ==, NULL);
kmem_cache_free(afs_inode_cachep, vnode);
atomic_dec(&afs_count_active_inodes);
}
/*
* return information about an AFS volume
*/
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct afs_volume_status vs;
struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
struct key *key;
int ret;
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
ret = afs_vnode_get_volume_status(vnode, key, &vs);
key_put(key);
if (ret < 0) {
_leave(" = %d", ret);
return ret;
}
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = AFS_BLOCK_SIZE;
buf->f_namelen = AFSNAMEMAX - 1;
if (vs.max_quota == 0)
buf->f_blocks = vs.part_max_blocks;
else
buf->f_blocks = vs.max_quota;
buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use;
return 0;
}
| KOala888/GB_kernel | linux_kernel_galaxyplayer-master/fs/afs/super.c | C | gpl-2.0 | 12,261 |
/**
* @author Richard Davey <rich@photonstorm.com>
* @copyright 2016 Photon Storm Ltd.
* @license {@link https://github.com/photonstorm/phaser/blob/master/license.txt|MIT License}
*/
/**
* An Image is a light-weight object you can use to display anything that doesn't need physics or animation.
* It can still rotate, scale, crop and receive input events. This makes it perfect for logos, backgrounds, simple buttons and other non-Sprite graphics.
*
* @class Phaser.Image
* @extends PIXI.Sprite
* @extends Phaser.Component.Core
* @extends Phaser.Component.Angle
* @extends Phaser.Component.Animation
* @extends Phaser.Component.AutoCull
* @extends Phaser.Component.Bounds
* @extends Phaser.Component.BringToTop
* @extends Phaser.Component.Crop
* @extends Phaser.Component.Destroy
* @extends Phaser.Component.FixedToCamera
* @extends Phaser.Component.InputEnabled
* @extends Phaser.Component.LifeSpan
* @extends Phaser.Component.LoadTexture
* @extends Phaser.Component.Overlap
* @extends Phaser.Component.Reset
* @extends Phaser.Component.ScaleMinMax
* @extends Phaser.Component.Smoothed
* @constructor
* @param {Phaser.Game} game - A reference to the currently running game.
* @param {number} [x=0] - The x coordinate of the Image. The coordinate is relative to any parent container this Image may be in.
* @param {number} [y=0] - The y coordinate of the Image. The coordinate is relative to any parent container this Image may be in.
* @param {string|Phaser.RenderTexture|Phaser.BitmapData|PIXI.Texture} [key] - The texture used by the Image during rendering. It can be a string which is a reference to the Cache entry, or an instance of a RenderTexture, BitmapData or PIXI.Texture.
* @param {string|number} [frame] - If this Image is using part of a sprite sheet or texture atlas you can specify the exact frame to use by giving a string or numeric index.
*/
Phaser.Image = function (game, x, y, key, frame) {
x = x || 0;
y = y || 0;
key = key || null;
frame = frame || null;
/**
* @property {number} type - The const type of this object.
* @readonly
*/
this.type = Phaser.IMAGE;
PIXI.Sprite.call(this, Phaser.Cache.DEFAULT);
Phaser.Component.Core.init.call(this, game, x, y, key, frame);
};
Phaser.Image.prototype = Object.create(PIXI.Sprite.prototype);
Phaser.Image.prototype.constructor = Phaser.Image;
Phaser.Component.Core.install.call(Phaser.Image.prototype, [
'Angle',
'Animation',
'AutoCull',
'Bounds',
'BringToTop',
'Crop',
'Destroy',
'FixedToCamera',
'InputEnabled',
'LifeSpan',
'LoadTexture',
'Overlap',
'Reset',
'ScaleMinMax',
'Smoothed'
]);
Phaser.Image.prototype.preUpdateInWorld = Phaser.Component.InWorld.preUpdate;
Phaser.Image.prototype.preUpdateCore = Phaser.Component.Core.preUpdate;
/**
* Automatically called by World.preUpdate.
*
* @method Phaser.Image#preUpdate
* @memberof Phaser.Image
*/
Phaser.Image.prototype.preUpdate = function() {
if (!this.preUpdateInWorld())
{
return false;
}
return this.preUpdateCore();
};
| stoneman1/phaser | v2/src/gameobjects/Image.js | JavaScript | mit | 3,099 |
#ifndef _ASM_X86_PGTABLE_64_H
#define _ASM_X86_PGTABLE_64_H
#include <linux/const.h>
#include <asm/pgtable_64_types.h>
#ifndef __ASSEMBLY__
/*
* This file contains the functions and defines necessary to modify and use
* the x86-64 page table tree.
*/
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512];
extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt
extern void paging_init(void);
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pmd_val(e))
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pud_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
struct mm_struct;
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
*ptep = native_make_pte(0);
}
static inline void native_set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
static inline void native_pmd_clear(pmd_t *pmd)
{
native_set_pmd(pmd, native_make_pmd(0));
}
static inline pte_t native_ptep_get_and_clear(pte_t *xp)
{
#ifdef CONFIG_SMP
return native_make_pte(xchg(&xp->pte, 0));
#else
/* native_local_ptep_get_and_clear,
but duplicated because of cyclic dependency */
pte_t ret = *xp;
native_pte_clear(NULL, 0, xp);
return ret;
#endif
}
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
#ifdef CONFIG_SMP
return native_make_pmd(xchg(&xp->pmd, 0));
#else
/* native_local_pmdp_get_and_clear,
but duplicated because of cyclic dependency */
pmd_t ret = *xp;
native_pmd_clear(xp);
return ret;
#endif
}
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
}
static inline void native_pud_clear(pud_t *pud)
{
native_set_pud(pud, native_make_pud(0));
}
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
*pgdp = pgd;
}
static inline void native_pgd_clear(pgd_t *pgd)
{
native_set_pgd(pgd, native_make_pgd(0));
}
extern void sync_global_pgds(unsigned long start, unsigned long end);
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
/*
* Level 4 access.
*/
static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
/* PMD - Level 2 access */
/* PTE - Level 1 access. */
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
/*
* Encode and de-code a swap entry
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
* | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
* there. We also need to avoid using A and D because of an
* erratum where they can be incorrectly set by hardware on
* non-present PTEs.
*/
#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
#define SWP_TYPE_BITS 5
/* Place the offset above the type: */
#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
& ((1U << SWP_TYPE_BITS) - 1))
#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) << (SWP_TYPE_FIRST_BIT)) \
| ((offset) << SWP_OFFSET_FIRST_BIT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)
#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
#define __HAVE_ARCH_PTE_SAME
#define vmemmap ((struct page *)VMEMMAP_START)
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
| bas-t/linux_media | arch/x86/include/asm/pgtable_64.h | C | gpl-2.0 | 5,183 |
/******************************************************************
*
* Copyright 2015 Samsung Electronics All Rights Reserved.
*
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************/
/**
* @file simulator_remote_resource.h
*
* @brief This file provides a class for handling discovered resources.
*
*/
#ifndef SIMULATOR_REMOTE_RESOURCE_H_
#define SIMULATOR_REMOTE_RESOURCE_H_
#include "simulator_client_types.h"
#include "simulator_resource_model.h"
#include "simulator_request_model.h"
#include "simulator_uncopyable.h"
#include "simulator_exceptions.h"
/**
* @class SimulatorRemoteResource
* @brief This class provides a API for handling discovered resources.
*/
class SimulatorRemoteResource : private UnCopyable
{
public:
/**
* Callback method for receiving response for GET, PUT and POST requests.
*
* @param uid - Identifier of remote resource.
* @param result - Result of the request mapped to one of the enum value
* in @SimulatorResult.
* @param resModel - Resource representation model.
*/
typedef std::function<void (const std::string &uid, SimulatorResult result,
const SimulatorResourceModel &resModel)>
ResponseCallback;
typedef ResponseCallback GetResponseCallback;
typedef ResponseCallback PutResponseCallback;
typedef ResponseCallback PostResponseCallback;
typedef ResponseCallback DeleteResponseCallback;
/**
* Callback method for receiving representation change notifications from remote resource.
*
* @param uid - Identifier of remote resource.
* @param result - Result of the request mapped to one of the enum value
* in @SimulatorResult.
* @param resModel - Resource representation model.
* @param id - Notificaiton sequence id.
*/
typedef std::function<void (const std::string &uid, SimulatorResult result,
const SimulatorResourceModel &resModel, int id)>
ObserveNotificationCallback;
/**
* Callback method for receiving auto request generation and verifiction progress state.
*
* @param uid - Identifier of remote resource.
* @param id - Auto request generation id.
* @param sate - Auto request generation and validation state.
*/
typedef std::function<void(const std::string &uid, int id, OperationState state)>
AutoRequestGenerationCallback;
/**
* API for getting URI of resource.
*
* @return URI of resource.
*/
virtual std::string getURI() const = 0;
/**
* API for getting host address of resource.
*
* @return Host address of resource.
*/
virtual std::string getHost() const = 0;
/**
* API for getting unique id of resource.
*
* @return ID of resource.
*/
virtual std::string getID() const = 0;
/**
* API for getting connectivity type of resource.
*
* @return enum SimulatorConnectivityType value
*/
virtual SimulatorConnectivityType getConnectivityType() const = 0;
/**
* API for getting resource types bound with the resource.
*
* @return vector of strings representing resource types.
*/
virtual std::vector < std::string > getResourceTypes() const = 0;
/**
* API for getting interface types bound with the resource.
*
* @return vector of strings representing interface types.
*/
virtual std::vector < std::string > getInterface() const = 0;
/**
* API to check whether resource can be observed or not.
*
* @return true if resource is observable, otherwise false.
*/
virtual bool isObservable() const = 0;
/**
* API to send observe request to remote resource.
*
* @param type - Observe request type.
* @param callback - callback for receiving notifications from remote resource
* asynchronously.
*/
virtual void observe(ObserveType type, ObserveNotificationCallback callback) = 0;
/**
* API to send cancel observe request to remote resource.
*
*/
virtual void cancelObserve() = 0;
/**
* API to send GET request to remote resource.
*
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void get(const GetResponseCallback &callback) = 0;
/**
* API to send GET request to remote resource.
*
* @param queryParams - Query parameters string.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void get(const std::map<std::string, std::string> &queryParams,
const GetResponseCallback &) = 0;
/**
* API to send GET request to remote resource.
*
* @param interfaceType - Interace type on which request to be sent.
* @param queryParams - Query parameters string.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void get(const std::string &interfaceType,
const std::map<std::string, std::string> &queryParams,
const GetResponseCallback &) = 0;
/**
* API to send PUT request to remote resource.
*
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void put(const SimulatorResourceModel &representation,
const PutResponseCallback &callback) = 0;
/**
* API to send PUT request to remote resource.
*
* @param queryParams - Query parameters string.
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void put(const std::map<std::string, std::string> &queryParams,
const SimulatorResourceModel &representation,
const PutResponseCallback &callback) = 0;
/**
* API to send PUT request to remote resource.
*
* @param interfaceType - Interace type on which request to be sent.
* @param queryParams - Query parameters string.
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void put(const std::string &interfaceType,
const std::map<std::string, std::string> &queryParams,
const SimulatorResourceModel &representation,
const PutResponseCallback &callback) = 0;
/**
* API to send POST request to remote resource.
*
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void post(const SimulatorResourceModel &representation,
const PostResponseCallback &callback) = 0;
/**
* API to send POST request to remote resource.
*
* @param queryParams - Query parameters string.
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void post(const std::map<std::string, std::string> &queryParams,
const SimulatorResourceModel &representation,
const PostResponseCallback &callback) = 0;
/**
* API to send POST request to remote resource.
*
* @param interfaceType - Interace type on which request to be sent.
* @param queryParams - Query parameters string.
* @param representation - Resource representation to be sent with request.
* @param callback - callback for receiving response from remote resource
* asynchronously.
*/
virtual void post(const std::string &interfaceType,
const std::map<std::string, std::string> &queryParams,
const SimulatorResourceModel &representation,
const PostResponseCallback &callback) = 0;
/**
* API to configure resource from RAML file.
*
* @param path - Path to RAML file.
*
* @return Map of request models representing the format of requests.
*/
virtual std::map<RequestType, SimulatorRequestModel> configure(
const std::string &path) = 0;
/**
* API to start generating requests and send to remote resource.
*
* @param type - Request type.
* @param callback - callback for receiving progress state of auto request
* generation process.
*
* @return Identifier of auto request generating session. This id should be used
* for stopping the same.
*/
virtual int startAutoRequesting(RequestType type,
AutoRequestGenerationCallback callback) = 0;
/**
* API to stop generating requests and send to remote resource.
*
* @param id - Identifier of auto request generating session.
*/
virtual void stopAutoRequesting(int id) = 0;
};
typedef std::shared_ptr<SimulatorRemoteResource> SimulatorRemoteResourceSP;
#endif
| kadasaikumar/iotivity-1.2.1 | service/simulator/inc/simulator_remote_resource.h | C | gpl-3.0 | 10,829 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Generic exporter to take a stdClass and prepare it for return by webservice.
*
* @package core
* @copyright 2015 Damyon Wiese
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
namespace core\external;
defined('MOODLE_INTERNAL') || die();
require_once($CFG->libdir . '/externallib.php');
use stdClass;
use renderer_base;
use context;
use context_system;
use coding_exception;
use external_single_structure;
use external_multiple_structure;
use external_value;
use external_format_value;
/**
* Generic exporter to take a stdClass and prepare it for return by webservice, or as the context for a template.
*
* templatable classes implementing export_for_template, should always use a standard exporter if it exists.
* External functions should always use a standard exporter if it exists.
*
* @copyright 2015 Damyon Wiese
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
abstract class exporter {
/** @var array $related List of related objects used to avoid DB queries. */
protected $related = array();
/** @var stdClass|array The data of this exporter. */
protected $data = null;
/**
* Constructor - saves the persistent object, and the related objects.
*
* @param mixed $data - Either an stdClass or an array of values.
* @param array $related - An optional list of pre-loaded objects related to this object.
*/
public function __construct($data, $related = array()) {
$this->data = $data;
// Cache the valid related objects.
foreach (static::define_related() as $key => $classname) {
$isarray = false;
$nullallowed = false;
// Allow ? to mean null is allowed.
if (substr($classname, -1) === '?') {
$classname = substr($classname, 0, -1);
$nullallowed = true;
}
// Allow [] to mean an array of values.
if (substr($classname, -2) === '[]') {
$classname = substr($classname, 0, -2);
$isarray = true;
}
$missingdataerr = 'Exporter class is missing required related data: (' . get_called_class() . ') ';
$scalartypes = ['string', 'int', 'bool', 'float'];
$scalarcheck = 'is_' . $classname;
if ($nullallowed && (!array_key_exists($key, $related) || $related[$key] === null)) {
$this->related[$key] = null;
} else if ($isarray) {
if (array_key_exists($key, $related) && is_array($related[$key])) {
foreach ($related[$key] as $index => $value) {
if (!$value instanceof $classname && !$scalarcheck($value)) {
throw new coding_exception($missingdataerr . $key . ' => ' . $classname . '[]');
}
}
$this->related[$key] = $related[$key];
} else {
throw new coding_exception($missingdataerr . $key . ' => ' . $classname . '[]');
}
} else {
if (array_key_exists($key, $related) &&
((in_array($classname, $scalartypes) && $scalarcheck($related[$key])) ||
($related[$key] instanceof $classname))) {
$this->related[$key] = $related[$key];
} else {
throw new coding_exception($missingdataerr . $key . ' => ' . $classname);
}
}
}
}
/**
* Function to export the renderer data in a format that is suitable for a
* mustache template. This means raw records are generated as in to_record,
* but all strings are correctly passed through external_format_text (or external_format_string).
*
* @param renderer_base $output Used to do a final render of any components that need to be rendered for export.
* @return stdClass
*/
final public function export(renderer_base $output) {
$data = new stdClass();
$properties = self::read_properties_definition();
$values = (array) $this->data;
$othervalues = $this->get_other_values($output);
if (array_intersect_key($values, $othervalues)) {
// Attempt to replace a standard property.
throw new coding_exception('Cannot override a standard property value.');
}
$values += $othervalues;
$record = (object) $values;
foreach ($properties as $property => $definition) {
if (isset($data->$property)) {
// This happens when we have already defined the format properties.
continue;
} else if (!property_exists($record, $property) && array_key_exists('default', $definition)) {
// We have a default value for this property.
$record->$property = $definition['default'];
} else if (!property_exists($record, $property) && !empty($definition['optional'])) {
// Fine, this property can be omitted.
continue;
} else if (!property_exists($record, $property)) {
// Whoops, we got something that wasn't defined.
throw new coding_exception('Unexpected property ' . $property);
}
$data->$property = $record->$property;
// If the field is PARAM_RAW and has a format field.
if ($propertyformat = self::get_format_field($properties, $property)) {
if (!property_exists($record, $propertyformat)) {
// Whoops, we got something that wasn't defined.
throw new coding_exception('Unexpected property ' . $propertyformat);
}
$formatparams = $this->get_format_parameters($property);
$format = $record->$propertyformat;
list($text, $format) = external_format_text($data->$property, $format, $formatparams['context'],
$formatparams['component'], $formatparams['filearea'], $formatparams['itemid'], $formatparams['options']);
$data->$property = $text;
$data->$propertyformat = $format;
} else if ($definition['type'] === PARAM_TEXT) {
$formatparams = $this->get_format_parameters($property);
if (!empty($definition['multiple'])) {
foreach ($data->$property as $key => $value) {
$data->{$property}[$key] = external_format_string($value, $formatparams['context'],
$formatparams['striplinks'], $formatparams['options']);
}
} else {
$data->$property = external_format_string($data->$property, $formatparams['context'],
$formatparams['striplinks'], $formatparams['options']);
}
}
}
return $data;
}
/**
* Get the format parameters.
*
* This method returns the parameters to use with the functions external_format_text(), and
* external_format_string(). To override the default parameters, you can define a protected method
* called 'get_format_parameters_for_<propertyName>'. For example, 'get_format_parameters_for_description',
* if your property is 'description'.
*
* Your method must return an array containing any of the following keys:
* - context: The context to use. Defaults to $this->related['context'] if defined, else throws an exception.
* - component: The component to use with external_format_text(). Defaults to null.
* - filearea: The filearea to use with external_format_text(). Defaults to null.
* - itemid: The itemid to use with external_format_text(). Defaults to null.
* - options: An array of options accepted by external_format_text() or external_format_string(). Defaults to [].
* - striplinks: Whether to strip the links with external_format_string(). Defaults to true.
*
* @param string $property The property to get the parameters for.
* @return array
*/
final protected function get_format_parameters($property) {
$parameters = [
'component' => null,
'filearea' => null,
'itemid' => null,
'options' => [],
'striplinks' => true,
];
$candidate = 'get_format_parameters_for_' . $property;
if (method_exists($this, $candidate)) {
$parameters = array_merge($parameters, $this->{$candidate}());
}
if (!isset($parameters['context'])) {
if (!isset($this->related['context']) || !($this->related['context'] instanceof context)) {
throw new coding_exception("Unknown context to use for formatting the property '$property' in the " .
"exporter '" . get_class($this) . "'. You either need to add 'context' to your related objects, " .
"or create the method '$candidate' and return the context from there.");
}
$parameters['context'] = $this->related['context'];
} else if (!($parameters['context'] instanceof context)) {
throw new coding_exception("The context given to format the property '$property' in the exporter '" .
get_class($this) . "' is invalid.");
}
return $parameters;
}
/**
* Get the additional values to inject while exporting.
*
* These are additional generated values that are not passed in through $data
* to the exporter. For a persistent exporter - these are generated values that
* do not exist in the persistent class. For your convenience the format_text or
* format_string functions do not need to be applied to PARAM_TEXT fields,
* it will be done automatically during export.
*
* These values are only used when returning data via {@link self::export()},
* they are not used when generating any of the different external structures.
*
* Note: These must be defined in {@link self::define_other_properties()}.
*
* @param renderer_base $output The renderer.
* @return array Keys are the property names, values are their values.
*/
protected function get_other_values(renderer_base $output) {
return array();
}
/**
* Get the read properties definition of this exporter. Read properties combines the
* default properties from the model (persistent or stdClass) with the properties defined
* by {@link self::define_other_properties()}.
*
* @return array Keys are the property names, and value their definition.
*/
final public static function read_properties_definition() {
$properties = static::properties_definition();
$customprops = static::define_other_properties();
$customprops = static::format_properties($customprops);
$properties += $customprops;
return $properties;
}
/**
* Recursively formats a given property definition with the default fields required.
*
* @param array $properties List of properties to format
* @return array Formatted array
*/
final public static function format_properties($properties) {
foreach ($properties as $property => $definition) {
// Ensures that null is set to its default.
if (!isset($definition['null'])) {
$properties[$property]['null'] = NULL_NOT_ALLOWED;
}
if (!isset($definition['description'])) {
$properties[$property]['description'] = $property;
}
// If an array is provided, it may be a nested array that is unformatted so rinse and repeat.
if (is_array($definition['type'])) {
$properties[$property]['type'] = static::format_properties($definition['type']);
}
}
return $properties;
}
/**
* Get the properties definition of this exporter used for create, and update structures.
* The read structures are returned by: {@link self::read_properties_definition()}.
*
* @return array Keys are the property names, and value their definition.
*/
final public static function properties_definition() {
$properties = static::define_properties();
foreach ($properties as $property => $definition) {
// Ensures that null is set to its default.
if (!isset($definition['null'])) {
$properties[$property]['null'] = NULL_NOT_ALLOWED;
}
if (!isset($definition['description'])) {
$properties[$property]['description'] = $property;
}
}
return $properties;
}
/**
* Return the list of additional properties used only for display.
*
* Additional properties are only ever used for the read structure, and during
* export of the persistent data.
*
* The format of the array returned by this method has to match the structure
* defined in {@link \core\persistent::define_properties()}. The display properties
* can however do some more fancy things. They can define 'multiple' => true to wrap
* values in an external_multiple_structure automatically - or they can define the
* type as a nested array of more properties in order to generate a nested
* external_single_structure.
*
* You can specify an array of values by including a 'multiple' => true array value. This
* will result in a nested external_multiple_structure.
* E.g.
*
* 'arrayofbools' => array(
* 'type' => PARAM_BOOL,
* 'multiple' => true
* ),
*
* You can return a nested array in the type field, which will result in a nested external_single_structure.
* E.g.
* 'competency' => array(
* 'type' => competency_exporter::read_properties_definition()
* ),
*
* Other properties can be specifically marked as optional, in which case they do not need
* to be included in the export in {@link self::get_other_values()}. This is useful when exporting
* a substructure which cannot be set as null due to webservices protocol constraints.
* E.g.
* 'competency' => array(
* 'type' => competency_exporter::read_properties_definition(),
* 'optional' => true
* ),
*
* @return array
*/
protected static function define_other_properties() {
return array();
}
/**
* Return the list of properties.
*
* The format of the array returned by this method has to match the structure
* defined in {@link \core\persistent::define_properties()}. Howewer you can
* add a new attribute "description" to describe the parameter for documenting the API.
*
* Note that the type PARAM_TEXT should ONLY be used for strings which need to
* go through filters (multilang, etc...) and do not have a FORMAT_* associated
* to them. Typically strings passed through to format_string().
*
* Other filtered strings which use a FORMAT_* constant (hear used with format_text)
* must be defined as PARAM_RAW.
*
* @return array
*/
protected static function define_properties() {
return array();
}
/**
* Returns a list of objects that are related to this persistent.
*
* Only objects listed here can be cached in this object.
*
* The class name can be suffixed:
* - with [] to indicate an array of values.
* - with ? to indicate that 'null' is allowed.
*
* @return array of 'propertyname' => array('type' => classname, 'required' => true)
*/
protected static function define_related() {
return array();
}
/**
* Get the context structure.
*
* @return external_single_structure
*/
final protected static function get_context_structure() {
return array(
'contextid' => new external_value(PARAM_INT, 'The context id', VALUE_OPTIONAL),
'contextlevel' => new external_value(PARAM_ALPHA, 'The context level', VALUE_OPTIONAL),
'instanceid' => new external_value(PARAM_INT, 'The Instance id', VALUE_OPTIONAL),
);
}
/**
* Get the format field name.
*
* @param array $definitions List of properties definitions.
* @param string $property The name of the property that may have a format field.
* @return bool|string False, or the name of the format property.
*/
final protected static function get_format_field($definitions, $property) {
$formatproperty = $property . 'format';
if (($definitions[$property]['type'] == PARAM_RAW || $definitions[$property]['type'] == PARAM_CLEANHTML)
&& isset($definitions[$formatproperty])
&& $definitions[$formatproperty]['type'] == PARAM_INT) {
return $formatproperty;
}
return false;
}
/**
* Get the format structure.
*
* @param string $property The name of the property on which the format applies.
* @param array $definition The definition of the format property.
* @param int $required Constant VALUE_*.
* @return external_format_value
*/
final protected static function get_format_structure($property, $definition, $required = VALUE_REQUIRED) {
if (array_key_exists('default', $definition)) {
$required = VALUE_DEFAULT;
}
return new external_format_value($property, $required);
}
/**
* Returns the create structure.
*
* @return external_single_structure
*/
final public static function get_create_structure() {
$properties = self::properties_definition();
$returns = array();
foreach ($properties as $property => $definition) {
if ($property == 'id') {
// The can not be set on create.
continue;
} else if (isset($returns[$property]) && substr($property, -6) === 'format') {
// We've already treated the format.
continue;
}
$required = VALUE_REQUIRED;
$default = null;
// We cannot use isset here because we want to detect nulls.
if (array_key_exists('default', $definition)) {
$required = VALUE_DEFAULT;
$default = $definition['default'];
}
// Magically treat the contextid fields.
if ($property == 'contextid') {
if (isset($properties['context'])) {
throw new coding_exception('There cannot be a context and a contextid column');
}
$returns += self::get_context_structure();
} else {
$returns[$property] = new external_value($definition['type'], $definition['description'], $required, $default,
$definition['null']);
// Magically treat the format properties.
if ($formatproperty = self::get_format_field($properties, $property)) {
if (isset($returns[$formatproperty])) {
throw new coding_exception('The format for \'' . $property . '\' is already defined.');
}
$returns[$formatproperty] = self::get_format_structure($property,
$properties[$formatproperty], VALUE_REQUIRED);
}
}
}
return new external_single_structure($returns);
}
/**
* Returns the read structure.
*
* @return external_single_structure
*/
final public static function get_read_structure() {
$properties = self::read_properties_definition();
return self::get_read_structure_from_properties($properties);
}
/**
* Returns the read structure from a set of properties (recursive).
*
* @param array $properties The properties.
* @param int $required Whether is required.
* @param mixed $default The default value.
* @return external_single_structure
*/
final protected static function get_read_structure_from_properties($properties, $required = VALUE_REQUIRED, $default = null) {
$returns = array();
foreach ($properties as $property => $definition) {
if (isset($returns[$property]) && substr($property, -6) === 'format') {
// We've already treated the format.
continue;
}
$thisvalue = null;
$type = $definition['type'];
$proprequired = VALUE_REQUIRED;
$propdefault = null;
if (array_key_exists('default', $definition)) {
$propdefault = $definition['default'];
}
if (array_key_exists('optional', $definition)) {
// Mark as optional. Note that this should only apply to "reading" "other" properties.
$proprequired = VALUE_OPTIONAL;
}
if (is_array($type)) {
// This is a nested array of more properties.
$thisvalue = self::get_read_structure_from_properties($type, $proprequired, $propdefault);
} else {
if ($definition['type'] == PARAM_TEXT || $definition['type'] == PARAM_CLEANHTML) {
// PARAM_TEXT always becomes PARAM_RAW because filters may be applied.
$type = PARAM_RAW;
}
$thisvalue = new external_value($type, $definition['description'], $proprequired, $propdefault, $definition['null']);
}
if (!empty($definition['multiple'])) {
$returns[$property] = new external_multiple_structure($thisvalue, $definition['description'], $proprequired,
$propdefault);
} else {
$returns[$property] = $thisvalue;
// Magically treat the format properties (not possible for arrays).
if ($formatproperty = self::get_format_field($properties, $property)) {
if (isset($returns[$formatproperty])) {
throw new coding_exception('The format for \'' . $property . '\' is already defined.');
}
$returns[$formatproperty] = self::get_format_structure($property, $properties[$formatproperty]);
}
}
}
return new external_single_structure($returns, '', $required, $default);
}
/**
* Returns the update structure.
*
* This structure can never be included at the top level for an external function signature
* because it contains optional parameters.
*
* @return external_single_structure
*/
final public static function get_update_structure() {
$properties = self::properties_definition();
$returns = array();
foreach ($properties as $property => $definition) {
if (isset($returns[$property]) && substr($property, -6) === 'format') {
// We've already treated the format.
continue;
}
$default = null;
$required = VALUE_OPTIONAL;
if ($property == 'id') {
$required = VALUE_REQUIRED;
}
// Magically treat the contextid fields.
if ($property == 'contextid') {
if (isset($properties['context'])) {
throw new coding_exception('There cannot be a context and a contextid column');
}
$returns += self::get_context_structure();
} else {
$returns[$property] = new external_value($definition['type'], $definition['description'], $required, $default,
$definition['null']);
// Magically treat the format properties.
if ($formatproperty = self::get_format_field($properties, $property)) {
if (isset($returns[$formatproperty])) {
throw new coding_exception('The format for \'' . $property . '\' is already defined.');
}
$returns[$formatproperty] = self::get_format_structure($property,
$properties[$formatproperty], VALUE_OPTIONAL);
}
}
}
return new external_single_structure($returns);
}
}
| marcusgreen/moodle | lib/classes/external/exporter.php | PHP | gpl-3.0 | 25,453 |
class Foo {
public void foo() {
int i;
}
} | asedunov/intellij-community | java/java-tests/testData/inspection/defUse/UnusedVariable.java | Java | apache-2.0 | 50 |
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package windows_test
import (
"fmt"
"internal/syscall/windows"
"os"
"os/exec"
"syscall"
"testing"
"unsafe"
)
func TestRunAtLowIntegrity(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
wil, err := getProcessIntegrityLevel()
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s\n", err.Error())
os.Exit(9)
return
}
fmt.Printf("%s", wil)
os.Exit(0)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestRunAtLowIntegrity", "--")
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
token, err := getIntegrityLevelToken(sidWilLow)
if err != nil {
t.Fatal(err)
}
defer token.Close()
cmd.SysProcAttr = &syscall.SysProcAttr{
Token: token,
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != sidWilLow {
t.Fatalf("Child process did not run as low integrity level: %s", string(out))
}
}
const (
sidWilLow = `S-1-16-4096`
)
func getProcessIntegrityLevel() (string, error) {
procToken, err := syscall.OpenCurrentProcessToken()
if err != nil {
return "", err
}
defer procToken.Close()
p, err := tokenGetInfo(procToken, syscall.TokenIntegrityLevel, 64)
if err != nil {
return "", err
}
tml := (*windows.TOKEN_MANDATORY_LABEL)(p)
sid := (*syscall.SID)(unsafe.Pointer(tml.Label.Sid))
return sid.String()
}
func tokenGetInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) {
n := uint32(initSize)
for {
b := make([]byte, n)
e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
if e == nil {
return unsafe.Pointer(&b[0]), nil
}
if e != syscall.ERROR_INSUFFICIENT_BUFFER {
return nil, e
}
if n <= uint32(len(b)) {
return nil, e
}
}
}
func getIntegrityLevelToken(wns string) (syscall.Token, error) {
var procToken, token syscall.Token
proc, err := syscall.GetCurrentProcess()
if err != nil {
return 0, err
}
defer syscall.CloseHandle(proc)
err = syscall.OpenProcessToken(proc,
syscall.TOKEN_DUPLICATE|
syscall.TOKEN_ADJUST_DEFAULT|
syscall.TOKEN_QUERY|
syscall.TOKEN_ASSIGN_PRIMARY,
&procToken)
if err != nil {
return 0, err
}
defer procToken.Close()
sid, err := syscall.StringToSid(wns)
if err != nil {
return 0, err
}
tml := &windows.TOKEN_MANDATORY_LABEL{}
tml.Label.Attributes = windows.SE_GROUP_INTEGRITY
tml.Label.Sid = sid
err = windows.DuplicateTokenEx(procToken, 0, nil, windows.SecurityImpersonation,
windows.TokenPrimary, &token)
if err != nil {
return 0, err
}
err = windows.SetTokenInformation(token,
syscall.TokenIntegrityLevel,
uintptr(unsafe.Pointer(tml)),
tml.Size())
if err != nil {
token.Close()
return 0, err
}
return token, nil
}
| Cofyc/go | src/internal/syscall/windows/exec_windows_test.go | GO | bsd-3-clause | 2,860 |
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0">
<title>{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}</title>
<meta name="author" content="{{ site.author.name }}" />
{% if page.subtitle %}
<meta name="description" content="{{ page.subtitle }}">
{% endif %}
<link rel="alternate" type="application/rss+xml" title="{{ site.title }} - {{ site.description }}" href="{{ site.baseurl }}/feed.xml" />
{% if layout.common-ext-css %}
{% for css in layout.common-ext-css %}
<link rel="stylesheet" href="{{ css }}" />
{% endfor %}
{% endif %}
{% if layout.common-css %}
{% for css in layout.common-css %}
<link rel="stylesheet" href="{{ css | prepend: site.baseurl | replace: '//', '/' }}" />
{% endfor %}
{% endif %}
{% if layout.common-googlefonts %}
{% for font in layout.common-googlefonts %}
<link rel="stylesheet" href="//fonts.googleapis.com/css?family={{ font }}" />
{% endfor %}
{% endif %}
{% if page.ext-css %}
{% for css in page.ext-css %}
<link rel="stylesheet" href="{{ css }}" />
{% endfor %}
{% endif %}
{% if page.css %}
{% for css in page.css %}
<link rel="stylesheet" href="{{ css | prepend: site.baseurl | replace: '//', '/' }}" />
{% endfor %}
{% endif %}
{% if page.googlefonts %}
{% for font in page.googlefonts %}
<link rel="stylesheet" href="//fonts.googleapis.com/css?family={{ font }}" />
{% endfor %}
{% endif %}
<!-- Facebook OpenGraph tags -->
<meta property="og:title" content="{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}" />
<meta property="og:type" content="website" />
{% if page.id %}
<meta property="og:url" content="{{ site.url }}{{ page.url }}/" />
{% else %}
<meta property="og:url" content="{{ site.url }}{{ page.url | remove: '/index.html' | remove: '.html' }}" />
{% endif %}
{% if page.fb-img %}
<meta property="og:image" content="{{ page.fb-img }}" />
{% elsif site.avatar %}
<meta property="og:image" content="{{ site.url }}{{ site.avatar }}" />
{% else %}
<meta property="og:image" content="" />
{% endif %}
</head>
| lachlanhurst/observedearth-web | _includes/head.html | HTML | mit | 2,326 |
#datatable-filter-form td,
#summary-filter-form td,
#datalist-filter-form td {
border: none;
}
.filter-controls {
clear: both;
}
/*button.btn.btn-primary.btn-filter,*/
input.btn.btn-primary.filter-submit {
width: 90%;
padding-right: 5px;
padding-left: 5px;
}
input.btn.btn-primary.filter-submit.filter-ajax {
width: 100%;
margin-top: 15px;
margin-bottom: -15px;
margin-left: -1px;
}
/* Responsive styling */
input.filter-search{
width: 50%;
text-align: left;
}
/* Large desktop */
@media (min-width: 1200px) {
input.filter-search{
width: 66%;
}
}
/* Portrait tablet to landscape and desktop */
@media (min-width: 768px) and (max-width: 979px) {
input.filter-search{
width: 33%;
}
input.btn.btn-primary.filter-submit.filter-ajax {
width: 100%;
margin-left: -1px;
}
}
/* Landscape phone to portrait tablet */
@media (max-width: 767px) {
input.filter-search{
width: 100% !important;
}
input.btn.btn-primary.filter-submit.filter-ajax {
width: 100%;
margin-left: -1px;
}
}
/* Landscape phones and down */
@media (max-width: 480px) {
input.filter-search{
width: 100% !important;
}
}
| devinbalkind/eden | static/themes/bootstrap/filter.css | CSS | mit | 1,219 |
<script>
// HACK: This is not an ideal way to deliver chrome messages
// to a innef frame content but seems only way that would
// make `event.source` an this (outer frame) window.
window.onmessage = function(event) {
var frame = document.querySelector("iframe");
var content = frame.contentWindow;
// If message is posted from chrome it has no `event.source`.
if (event.source === null)
content.postMessage(event.data, "*");
};
// Hack: Ideally we would have used srcdoc on iframe, but in
// that case origin of document is either content which is unable
// to load add-on resources or a chrome to which add-on resource
// can not send messages back.
document.documentElement.style.overflow = "hidden";
document.documentElement.innerHTML = atob(location.hash.substr(1));
</script>
| willium/browser-extension | tools/addon-sdk-1.16/lib/sdk/ui/frame/view.html | HTML | mit | 794 |
/*
* linux/fs/buffer.c
*
* Copyright (C) 1991, 1992, 2002 Linus Torvalds
*/
/*
* Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
*
* Removed a lot of unnecessary code and simplified things now that
* the buffer cache isn't our primary cache - Andrew Tridgell 12/96
*
* Speed up hash, lru, and free list operations. Use gfp() for allocating
* hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
*
* Added 32k buffer block sizes - these are required older ARM systems. - RMK
*
* async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/writeback.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
inline void
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_end_io = handler;
bh->b_private = private;
}
EXPORT_SYMBOL(init_buffer);
static int sleep_on_buffer(void *word)
{
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
void unlock_buffer(struct buffer_head *bh)
{
clear_bit_unlock(BH_Lock, &bh->b_state);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
}
EXPORT_SYMBOL(unlock_buffer);
/*
* Block until a buffer comes unlocked. This doesn't stop it
* from becoming locked again - you have to lock it yourself
* if you want to preserve its state.
*/
void __wait_on_buffer(struct buffer_head * bh)
{
wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__wait_on_buffer);
static void
__clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
}
static int quiet_error(struct buffer_head *bh)
{
if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
return 0;
return 1;
}
static void buffer_io_error(struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
}
/*
* End-of-IO handler helper function which does not touch the bh after
* unlocking it.
* Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
* a race there is benign: unlock_buffer() only use the bh's address for
* hashing after unlocking the buffer, so it doesn't actually touch the bh
* itself.
*/
static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
} else {
/* This happens, due to failed READA attempts. */
clear_buffer_uptodate(bh);
}
unlock_buffer(bh);
}
/*
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
* unlock the buffer. This is what ll_rw_block uses too.
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
__end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
EXPORT_SYMBOL(end_buffer_read_sync);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
if (uptodate) {
set_buffer_uptodate(bh);
} else {
if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
}
set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
}
unlock_buffer(bh);
put_bh(bh);
}
EXPORT_SYMBOL(end_buffer_write_sync);
/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
* private_lock.
*
* Hack idea: for the blockdev mapping, i_bufferlist_lock contention
* may be quite high. This code could TryLock the page, and if that
* succeeds, there is no need to take private_lock. (But if
* private_lock is contended then so is mapping->tree_lock).
*/
static struct buffer_head *
__find_get_block_slow(struct block_device *bdev, sector_t block)
{
struct inode *bd_inode = bdev->bd_inode;
struct address_space *bd_mapping = bd_inode->i_mapping;
struct buffer_head *ret = NULL;
pgoff_t index;
struct buffer_head *bh;
struct buffer_head *head;
struct page *page;
int all_mapped = 1;
index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
page = find_get_page(bd_mapping, index);
if (!page)
goto out;
spin_lock(&bd_mapping->private_lock);
if (!page_has_buffers(page))
goto out_unlock;
head = page_buffers(page);
bh = head;
do {
if (!buffer_mapped(bh))
all_mapped = 0;
else if (bh->b_blocknr == block) {
ret = bh;
get_bh(bh);
goto out_unlock;
}
bh = bh->b_this_page;
} while (bh != head);
/* we might be here because some of the buffers on this page are
* not mapped. This is due to various races between
* file io on the block device and getblk. It gets dealt with
* elsewhere, don't buffer_error if we had some unmapped buffers
*/
if (all_mapped) {
char b[BDEVNAME_SIZE];
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
(unsigned long long)block,
(unsigned long long)bh->b_blocknr);
printk("b_state=0x%08lx, b_size=%zu\n",
bh->b_state, bh->b_size);
printk("device %s blocksize: %d\n", bdevname(bdev, b),
1 << bd_inode->i_blkbits);
}
out_unlock:
spin_unlock(&bd_mapping->private_lock);
page_cache_release(page);
out:
return ret;
}
/*
* Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/
static void free_more_memory(void)
{
struct zone *zone;
int nid;
wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
yield();
for_each_online_node(nid) {
(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
gfp_zone(GFP_NOFS), NULL,
&zone);
if (zone)
try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
GFP_NOFS, NULL);
}
}
/*
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
int page_uptodate = 1;
BUG_ON(!buffer_async_read(bh));
page = bh->b_page;
if (uptodate) {
set_buffer_uptodate(bh);
} else {
clear_buffer_uptodate(bh);
if (!quiet_error(bh))
buffer_io_error(bh);
SetPageError(page);
}
/*
* Be _very_ careful from here on. Bad things can happen if
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
first = page_buffers(page);
local_irq_save(flags);
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
do {
if (!buffer_uptodate(tmp))
page_uptodate = 0;
if (buffer_async_read(tmp)) {
BUG_ON(!buffer_locked(tmp));
goto still_busy;
}
tmp = tmp->b_this_page;
} while (tmp != bh);
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
* uptodate then we can set the page uptodate.
*/
if (page_uptodate && !PageError(page))
SetPageUptodate(page);
unlock_page(page);
return;
still_busy:
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
local_irq_restore(flags);
return;
}
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
BUG_ON(!buffer_async_write(bh));
page = bh->b_page;
if (uptodate) {
set_buffer_uptodate(bh);
} else {
if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
}
set_bit(AS_EIO, &page->mapping->flags);
set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
SetPageError(page);
}
first = page_buffers(page);
local_irq_save(flags);
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_write(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
while (tmp != bh) {
if (buffer_async_write(tmp)) {
BUG_ON(!buffer_locked(tmp));
goto still_busy;
}
tmp = tmp->b_this_page;
}
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
local_irq_restore(flags);
return;
}
EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
* completion) then there is a possibility that another thread of
* control could lock one of the buffers after it has completed
* but while some of the other buffers have not completed. This
* locked buffer would confuse end_buffer_async_read() into not unlocking
* the page. So the absence of BH_Async_Read tells end_buffer_async_read()
* that this buffer is not under async I/O.
*
* The page comes unlocked when it has no locked buffer_async buffers
* left.
*
* PageLocked prevents anyone starting new async I/O reads any of
* the buffers.
*
* PageWriteback is used to prevent simultaneous writeout of the same
* page.
*
* PageLocked prevents anyone from starting writeback of a page which is
* under read I/O (PageWriteback is only ever set against a locked page).
*/
static void mark_buffer_async_read(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_read;
set_buffer_async_read(bh);
}
static void mark_buffer_async_write_endio(struct buffer_head *bh,
bh_end_io_t *handler)
{
bh->b_end_io = handler;
set_buffer_async_write(bh);
}
void mark_buffer_async_write(struct buffer_head *bh)
{
mark_buffer_async_write_endio(bh, end_buffer_async_write);
}
EXPORT_SYMBOL(mark_buffer_async_write);
/*
* fs/buffer.c contains helper functions for buffer-backed address space's
* fsync functions. A common requirement for buffer-based filesystems is
* that certain data from the backing blockdev needs to be written out for
* a successful fsync(). For example, ext2 indirect blocks need to be
* written back and waited upon before fsync() returns.
*
* The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
* inode_has_buffers() and invalidate_inode_buffers() are provided for the
* management of a list of dependent buffers at ->i_mapping->private_list.
*
* Locking is a little subtle: try_to_free_buffers() will remove buffers
* from their controlling inode's queue when they are being freed. But
* try_to_free_buffers() will be operating against the *blockdev* mapping
* at the time, not against the S_ISREG file which depends on those buffers.
* So the locking for private_list is via the private_lock in the address_space
* which backs the buffers. Which is different from the address_space
* against which the buffers are listed. So for a particular address_space,
* mapping->private_lock does *not* protect mapping->private_list! In fact,
* mapping->private_list will always be protected by the backing blockdev's
* ->private_lock.
*
* Which introduces a requirement: all buffers on an address_space's
* ->private_list must be from the same address_space: the blockdev's.
*
* address_spaces which do not place buffers at ->private_list via these
* utility functions are free to use private_lock and private_list for
* whatever they want. The only requirement is that list_empty(private_list)
* be true at clear_inode() time.
*
* FIXME: clear_inode should not call invalidate_inode_buffers(). The
* filesystems should do that. invalidate_inode_buffers() should just go
* BUG_ON(!list_empty).
*
* FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
* take an address_space, not an inode. And it should be called
* mark_buffer_dirty_fsync() to clearly define why those buffers are being
* queued up.
*
* FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
* list if it is already on a list. Because if the buffer is on a list,
* it *must* already be on the right one. If not, the filesystem is being
* silly. This will save a ton of locking. But first we have to ensure
* that buffers are taken *off* the old inode's list when they are freed
* (presumably in truncate). That requires careful auditing of all
* filesystems (do it inside bforget()). It could also be done by bringing
* b_inode back.
*/
/*
* The buffer's backing address_space's private_lock must be held
*/
static void __remove_assoc_queue(struct buffer_head *bh)
{
list_del_init(&bh->b_assoc_buffers);
WARN_ON(!bh->b_assoc_map);
if (buffer_write_io_error(bh))
set_bit(AS_EIO, &bh->b_assoc_map->flags);
bh->b_assoc_map = NULL;
}
int inode_has_buffers(struct inode *inode)
{
return !list_empty(&inode->i_data.private_list);
}
/*
* osync is designed to support O_SYNC io. It waits synchronously for
* all already-submitted IO to complete, but does not queue any new
* writes to the disk.
*
* To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
* you dirty the buffers, and then use osync_inode_buffers to wait for
* completion. Any other dirty buffers which are not yet queued for
* write will not be flushed to disk by the osync.
*/
static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head *p;
int err = 0;
spin_lock(lock);
repeat:
list_for_each_prev(p, list) {
bh = BH_ENTRY(p);
if (buffer_locked(bh)) {
get_bh(bh);
spin_unlock(lock);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
err = -EIO;
brelse(bh);
spin_lock(lock);
goto repeat;
}
}
spin_unlock(lock);
return err;
}
static void do_thaw_one(struct super_block *sb, void *unused)
{
char b[BDEVNAME_SIZE];
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
printk(KERN_WARNING "Emergency Thaw on %s\n",
bdevname(sb->s_bdev, b));
}
static void do_thaw_all(struct work_struct *work)
{
iterate_supers(do_thaw_one, NULL);
kfree(work);
printk(KERN_WARNING "Emergency Thaw complete\n");
}
/**
* emergency_thaw_all -- forcibly thaw every frozen filesystem
*
* Used for emergency unfreeze of all filesystems via SysRq
*/
void emergency_thaw_all(void)
{
struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, do_thaw_all);
schedule_work(work);
}
}
/**
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
* that I/O.
*
* Basically, this is a convenience function for fsync().
* @mapping is a file or directory which needs those buffers to be written for
* a successful fsync().
*/
int sync_mapping_buffers(struct address_space *mapping)
{
struct address_space *buffer_mapping = mapping->assoc_mapping;
if (buffer_mapping == NULL || list_empty(&mapping->private_list))
return 0;
return fsync_buffers_list(&buffer_mapping->private_lock,
&mapping->private_list);
}
EXPORT_SYMBOL(sync_mapping_buffers);
/*
* Called when we've recently written block `bblock', and it is known that
* `bblock' was for a buffer_boundary() buffer. This means that the block at
* `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
* dirty, schedule it for IO. So that indirects merge nicely with their data.
*/
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize)
{
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
ll_rw_block(WRITE, 1, &bh);
put_bh(bh);
}
}
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
struct address_space *buffer_mapping = bh->b_page->mapping;
mark_buffer_dirty(bh);
if (!mapping->assoc_mapping) {
mapping->assoc_mapping = buffer_mapping;
} else {
BUG_ON(mapping->assoc_mapping != buffer_mapping);
}
if (!bh->b_assoc_map) {
spin_lock(&buffer_mapping->private_lock);
list_move_tail(&bh->b_assoc_buffers,
&mapping->private_list);
bh->b_assoc_map = mapping;
spin_unlock(&buffer_mapping->private_lock);
}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
/*
* Mark the page dirty, and set it dirty in the radix tree, and mark the inode
* dirty.
*
* If warn is true, then emit a warning if the page is not uptodate and has
* not been truncated.
*/
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
/*
* Add a page to the dirty page list.
*
* It is a sad fact of life that this function is called from several places
* deeply under spinlocking. It may not sleep.
*
* If the page has buffers, the uptodate buffers are set dirty, to preserve
* dirty-state coherency between the page and the buffers. It the page does
* not have buffers then when they are later attached they will all be set
* dirty.
*
* The buffers are dirtied before the page is dirtied. There's a small race
* window in which a writepage caller may see the page cleanness but not the
* buffer dirtiness. That's fine. If this code were to set the page dirty
* before the buffers, a concurrent writepage caller could clear the page dirty
* bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
* page on the dirty page list.
*
* We use private_lock to lock against try_to_free_buffers while using the
* page's buffer list. Also use this to protect against clean buffers being
* added to the page after it was set dirty.
*
* FIXME: may need to call ->reservepage here as well. That's rather up to the
* address_space though.
*/
int __set_page_dirty_buffers(struct page *page)
{
int newly_dirty;
struct address_space *mapping = page_mapping(page);
if (unlikely(!mapping))
return !TestSetPageDirty(page);
spin_lock(&mapping->private_lock);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
do {
set_buffer_dirty(bh);
bh = bh->b_this_page;
} while (bh != head);
}
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);
if (newly_dirty)
__set_page_dirty(page, mapping, 1);
return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
/*
* Write out and wait upon a list of buffers.
*
* We have conflicting pressures: we want to make sure that all
* initially dirty buffers get waited on, but that any subsequently
* dirtied buffers don't. After all, we don't want fsync to last
* forever if somebody is actively writing to the file.
*
* Do this in two main stages: first we copy dirty buffers to a
* temporary inode list, queueing the writes as we go. Then we clean
* up, waiting for those writes to complete.
*
* During this second stage, any subsequent updates to the file may end
* up refiling the buffer on the original inode's dirty list again, so
* there is a chance we will end up with a buffer queued for write but
* not yet completed on that list. So, as a final cleanup we go through
* the osync code to catch these locked, dirty buffers without requeuing
* any newly dirty buffers for write.
*/
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
struct address_space *mapping;
int err = 0, err2;
struct blk_plug plug;
INIT_LIST_HEAD(&tmp);
blk_start_plug(&plug);
spin_lock(lock);
while (!list_empty(list)) {
bh = BH_ENTRY(list->next);
mapping = bh->b_assoc_map;
__remove_assoc_queue(bh);
/* Avoid race with mark_buffer_dirty_inode() which does
* a lockless check and we rely on seeing the dirty bit */
smp_mb();
if (buffer_dirty(bh) || buffer_locked(bh)) {
list_add(&bh->b_assoc_buffers, &tmp);
bh->b_assoc_map = mapping;
if (buffer_dirty(bh)) {
get_bh(bh);
spin_unlock(lock);
/*
* Ensure any pending I/O completes so that
* write_dirty_buffer() actually writes the
* current contents - it is a noop if I/O is
* still in flight on potentially older
* contents.
*/
write_dirty_buffer(bh, WRITE_SYNC);
/*
* Kick off IO for the previous mapping. Note
* that we will not run the very last mapping,
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
brelse(bh);
spin_lock(lock);
}
}
}
spin_unlock(lock);
blk_finish_plug(&plug);
spin_lock(lock);
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
get_bh(bh);
mapping = bh->b_assoc_map;
__remove_assoc_queue(bh);
/* Avoid race with mark_buffer_dirty_inode() which does
* a lockless check and we rely on seeing the dirty bit */
smp_mb();
if (buffer_dirty(bh)) {
list_add(&bh->b_assoc_buffers,
&mapping->private_list);
bh->b_assoc_map = mapping;
}
spin_unlock(lock);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
err = -EIO;
brelse(bh);
spin_lock(lock);
}
spin_unlock(lock);
err2 = osync_buffers_list(lock, list);
if (err)
return err;
else
return err2;
}
/*
* Invalidate any and all dirty buffers on a given inode. We are
* probably unmounting the fs, but that doesn't mean we have already
* done a sync(). Just drop the buffers from the inode list.
*
* NOTE: we take the inode's blockdev's mapping's private_lock. Which
* assumes that all the buffers are against the blockdev. Not true
* for reiserfs.
*/
void invalidate_inode_buffers(struct inode *inode)
{
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
struct list_head *list = &mapping->private_list;
struct address_space *buffer_mapping = mapping->assoc_mapping;
spin_lock(&buffer_mapping->private_lock);
while (!list_empty(list))
__remove_assoc_queue(BH_ENTRY(list->next));
spin_unlock(&buffer_mapping->private_lock);
}
}
EXPORT_SYMBOL(invalidate_inode_buffers);
/*
* Remove any clean buffers from the inode's buffer list. This is called
* when we're trying to free the inode itself. Those buffers can pin it.
*
* Returns true if all buffers were removed.
*/
int remove_inode_buffers(struct inode *inode)
{
int ret = 1;
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
struct list_head *list = &mapping->private_list;
struct address_space *buffer_mapping = mapping->assoc_mapping;
spin_lock(&buffer_mapping->private_lock);
while (!list_empty(list)) {
struct buffer_head *bh = BH_ENTRY(list->next);
if (buffer_dirty(bh)) {
ret = 0;
break;
}
__remove_assoc_queue(bh);
}
spin_unlock(&buffer_mapping->private_lock);
}
return ret;
}
/*
* Create the appropriate buffers when given a page for data area and
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
*
* The retry flag is used to differentiate async IO (paging, swapping)
* which may not fail from ordinary buffer allocations.
*/
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
int retry)
{
struct buffer_head *bh, *head;
long offset;
try_again:
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) >= 0) {
bh = alloc_buffer_head(GFP_NOFS);
if (!bh)
goto no_grow;
bh->b_bdev = NULL;
bh->b_this_page = head;
bh->b_blocknr = -1;
head = bh;
bh->b_state = 0;
atomic_set(&bh->b_count, 0);
bh->b_size = size;
/* Link the buffer to its page */
set_bh_page(bh, page, offset);
init_buffer(bh, NULL, NULL);
}
return head;
/*
* In case anything failed, we just free everything we got.
*/
no_grow:
if (head) {
do {
bh = head;
head = head->b_this_page;
free_buffer_head(bh);
} while (head);
}
/*
* Return failure for non-async IO requests. Async IO requests
* are not allowed to fail, so we have to wait until buffer heads
* become available. But we don't want tasks sleeping with
* partially complete buffers, so all were released above.
*/
if (!retry)
return NULL;
/* We're _really_ low on memory. Now we just
* wait for old buffer heads to become free due to
* finishing IO. Since this is an async request and
* the reserve list is empty, we're sure there are
* async buffer heads in use.
*/
free_more_memory();
goto try_again;
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);
static inline void
link_dev_buffers(struct page *page, struct buffer_head *head)
{
struct buffer_head *bh, *tail;
bh = head;
do {
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
attach_page_buffers(page, head);
}
/*
* Initialise the state of a blockdev page's buffers.
*/
static sector_t
init_page_buffers(struct page *page, struct block_device *bdev,
sector_t block, int size)
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
int uptodate = PageUptodate(page);
sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
do {
if (!buffer_mapped(bh)) {
init_buffer(bh, NULL, NULL);
bh->b_bdev = bdev;
bh->b_blocknr = block;
if (uptodate)
set_buffer_uptodate(bh);
if (block < end_block)
set_buffer_mapped(bh);
}
block++;
bh = bh->b_this_page;
} while (bh != head);
/*
* Caller needs to validate requested block against end of device.
*/
return end_block;
}
/*
* Create the page-cache page that contains the requested block.
*
* This is used purely for blockdev mappings.
*/
static int
grow_dev_page(struct block_device *bdev, sector_t block,
pgoff_t index, int size, int sizebits)
{
struct inode *inode = bdev->bd_inode;
struct page *page;
struct buffer_head *bh;
sector_t end_block;
int ret = 0; /* Will call free_more_memory() */
page = find_or_create_page(inode->i_mapping, index,
(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
if (!page)
return ret;
BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
if (bh->b_size == size) {
end_block = init_page_buffers(page, bdev,
index << sizebits, size);
goto done;
}
if (!try_to_free_buffers(page))
goto failed;
}
/*
* Allocate some buffers for this page
*/
bh = alloc_page_buffers(page, size, 0);
if (!bh)
goto failed;
/*
* Link the page to the buffers and initialise them. Take the
* lock to be atomic wrt __find_get_block(), which does not
* run under the page lock.
*/
spin_lock(&inode->i_mapping->private_lock);
link_dev_buffers(page, bh);
end_block = init_page_buffers(page, bdev, index << sizebits, size);
spin_unlock(&inode->i_mapping->private_lock);
done:
ret = (block < end_block) ? 1 : -ENXIO;
failed:
unlock_page(page);
page_cache_release(page);
return ret;
}
/*
* Create buffers for the specified block device block's page. If
* that page was dirty, the buffers are set dirty also.
*/
static int
grow_buffers(struct block_device *bdev, sector_t block, int size)
{
pgoff_t index;
int sizebits;
sizebits = -1;
do {
sizebits++;
} while ((size << sizebits) < PAGE_SIZE);
index = block >> sizebits;
/*
* Check for a block which wants to lie outside our maximum possible
* pagecache index. (this comparison is done using sector_t types).
*/
if (unlikely(index != block >> sizebits)) {
char b[BDEVNAME_SIZE];
printk(KERN_ERR "%s: requested out-of-range block %llu for "
"device %s\n",
__func__, (unsigned long long)block,
bdevname(bdev, b));
return -EIO;
}
/* Create a page with the proper size buffers.. */
return grow_dev_page(bdev, block, index, size, sizebits);
}
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
printk(KERN_ERR "getblk(): invalid block size %d requested\n",
size);
printk(KERN_ERR "logical block size: %d\n",
bdev_logical_block_size(bdev));
dump_stack();
return NULL;
}
for (;;) {
struct buffer_head *bh;
int ret;
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
ret = grow_buffers(bdev, block, size);
if (ret < 0)
return NULL;
if (ret == 0)
free_more_memory();
}
}
/*
* The relationship between dirty buffers and dirty pages:
*
* Whenever a page has any dirty buffers, the page's dirty bit is set, and
* the page is tagged dirty in its radix tree.
*
* At all times, the dirtiness of the buffers represents the dirtiness of
* subsections of the page. If the page has buffers, the page dirty bit is
* merely a hint about the true dirty state.
*
* When a page is set dirty in its entirety, all its buffers are marked dirty
* (if the page has buffers).
*
* When a buffer is marked dirty, its page is dirtied, but the page's other
* buffers are not.
*
* Also. When blockdev buffers are explicitly read with bread(), they
* individually become uptodate. But their backing page remains not
* uptodate - even if all of its buffers are uptodate. A subsequent
* block_read_full_page() against that page will discover all the uptodate
* buffers, will set the page uptodate and will perform no I/O.
*/
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
* @bh: the buffer_head to mark dirty
*
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its
* backing page dirty, then tag the page as dirty in its address_space's radix
* tree and then attach the address_space's inode to its superblock's dirty
* inode list.
*
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
* mapping->tree_lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
{
WARN_ON_ONCE(!buffer_uptodate(bh));
/*
* Very *carefully* optimize the it-is-already-dirty case.
*
* Don't let the final "is it dirty" escape to before we
* perhaps modified the buffer.
*/
if (buffer_dirty(bh)) {
smp_mb();
if (buffer_dirty(bh))
return;
}
if (!test_set_buffer_dirty(bh)) {
struct page *page = bh->b_page;
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
if (mapping)
__set_page_dirty(page, mapping, 0);
}
}
}
EXPORT_SYMBOL(mark_buffer_dirty);
/*
* Decrement a buffer_head's reference count. If all buffers against a page
* have zero reference count, are clean and unlocked, and if the page is clean
* and unlocked then try_to_free_buffers() may strip the buffers from the page
* in preparation for freeing it (sometimes, rarely, buffers are removed from
* a page but it ends up not being freed, and buffers may later be reattached).
*/
void __brelse(struct buffer_head * buf)
{
if (atomic_read(&buf->b_count)) {
put_bh(buf);
return;
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
EXPORT_SYMBOL(__brelse);
/*
* bforget() is like brelse(), except it discards any
* potentially dirty data.
*/
void __bforget(struct buffer_head *bh)
{
clear_buffer_dirty(bh);
if (bh->b_assoc_map) {
struct address_space *buffer_mapping = bh->b_page->mapping;
spin_lock(&buffer_mapping->private_lock);
list_del_init(&bh->b_assoc_buffers);
bh->b_assoc_map = NULL;
spin_unlock(&buffer_mapping->private_lock);
}
__brelse(bh);
}
EXPORT_SYMBOL(__bforget);
static struct buffer_head *__bread_slow(struct buffer_head *bh)
{
lock_buffer(bh);
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
return bh;
} else {
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
}
brelse(bh);
return NULL;
}
/*
* Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
* The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
* refcount elevated by one when they're in an LRU. A buffer can only appear
* once in a particular CPU's LRU. A single buffer can be present in multiple
* CPU's LRUs at the same time.
*
* This is a transparent caching front-end to sb_bread(), sb_getblk() and
* sb_find_get_block().
*
* The LRUs themselves only need locking against invalidate_bh_lrus. We use
* a local interrupt disable for that.
*/
#define BH_LRU_SIZE 8
struct bh_lru {
struct buffer_head *bhs[BH_LRU_SIZE];
};
static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
#ifdef CONFIG_SMP
#define bh_lru_lock() local_irq_disable()
#define bh_lru_unlock() local_irq_enable()
#else
#define bh_lru_lock() preempt_disable()
#define bh_lru_unlock() preempt_enable()
#endif
static inline void check_irqs_on(void)
{
#ifdef irqs_disabled
BUG_ON(irqs_disabled());
#endif
}
/*
* The LRU management algorithm is dopey-but-simple. Sorry.
*/
static void bh_lru_install(struct buffer_head *bh)
{
struct buffer_head *evictee = NULL;
check_irqs_on();
bh_lru_lock();
if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
struct buffer_head *bhs[BH_LRU_SIZE];
int in;
int out = 0;
get_bh(bh);
bhs[out++] = bh;
for (in = 0; in < BH_LRU_SIZE; in++) {
struct buffer_head *bh2 =
__this_cpu_read(bh_lrus.bhs[in]);
if (bh2 == bh) {
__brelse(bh2);
} else {
if (out >= BH_LRU_SIZE) {
BUG_ON(evictee != NULL);
evictee = bh2;
} else {
bhs[out++] = bh2;
}
}
}
while (out < BH_LRU_SIZE)
bhs[out++] = NULL;
memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
}
bh_lru_unlock();
if (evictee)
__brelse(evictee);
}
/*
* Look up the bh in this cpu's LRU. If it's there, move it to the head.
*/
static struct buffer_head *
lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *ret = NULL;
unsigned int i;
check_irqs_on();
bh_lru_lock();
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
if (bh && bh->b_bdev == bdev &&
bh->b_blocknr == block && bh->b_size == size) {
if (i) {
while (i) {
__this_cpu_write(bh_lrus.bhs[i],
__this_cpu_read(bh_lrus.bhs[i - 1]));
i--;
}
__this_cpu_write(bh_lrus.bhs[0], bh);
}
get_bh(bh);
ret = bh;
break;
}
}
bh_lru_unlock();
return ret;
}
/*
* Perform a pagecache lookup for the matching buffer. If it's there, refresh
* it in the LRU and mark it as accessed. If it is not present then return
* NULL
*/
struct buffer_head *
__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) {
bh = __find_get_block_slow(bdev, block);
if (bh)
bh_lru_install(bh);
}
if (bh)
touch_buffer(bh);
return bh;
}
EXPORT_SYMBOL(__find_get_block);
/*
* __getblk will locate (and, if necessary, create) the buffer_head
* which corresponds to the passed block_device, block and size. The
* returned buffer has its reference count incremented.
*
* __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
* attempt is failing. FIXME, perhaps?
*/
struct buffer_head *
__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
might_sleep();
if (bh == NULL)
bh = __getblk_slow(bdev, block, size);
return bh;
}
EXPORT_SYMBOL(__getblk);
/*
* Do async read-ahead on a buffer..
*/
void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
ll_rw_block(READA, 1, &bh);
brelse(bh);
}
}
EXPORT_SYMBOL(__breadahead);
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from
* @block: number of block
* @size: size (in bytes) to read
*
* Reads a specified block, and returns buffer head that contains it.
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
__bread(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh) && !buffer_uptodate(bh))
bh = __bread_slow(bh);
return bh;
}
EXPORT_SYMBOL(__bread);
/*
* invalidate_bh_lrus() is called rarely - but not only at unmount.
* This doesn't race because it runs in each cpu either in irq
* or with preempt disabled.
*/
static void invalidate_bh_lru(void *arg)
{
struct bh_lru *b = &get_cpu_var(bh_lrus);
int i;
for (i = 0; i < BH_LRU_SIZE; i++) {
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
put_cpu_var(bh_lrus);
}
static bool has_bh_in_lru(int cpu, void *dummy)
{
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
int i;
for (i = 0; i < BH_LRU_SIZE; i++) {
if (b->bhs[i])
return 1;
}
return 0;
}
void invalidate_bh_lrus(void)
{
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset)
{
bh->b_page = page;
BUG_ON(offset >= PAGE_SIZE);
if (PageHighMem(page))
/*
* This catches illegal uses and preserves the offset:
*/
bh->b_data = (char *)(0 + offset);
else
bh->b_data = page_address(page) + offset;
}
EXPORT_SYMBOL(set_bh_page);
/*
* Called when truncating a buffer on a page completely.
*/
static void discard_buffer(struct buffer_head * bh)
{
lock_buffer(bh);
clear_buffer_dirty(bh);
bh->b_bdev = NULL;
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
clear_buffer_unwritten(bh);
unlock_buffer(bh);
}
/**
* block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
* @offset: the index of the truncation point
*
* block_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
*
* block_invalidatepage() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
/*
* is this block fully invalidated?
*/
if (offset <= curr_off)
discard_buffer(bh);
curr_off = next_off;
bh = next;
} while (bh != head);
/*
* We release buffers only if the entire page is being invalidated.
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
if (offset == 0)
try_to_release_page(page, 0);
out:
return;
}
EXPORT_SYMBOL(block_invalidatepage);
/*
* We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
* is already excluded via the page lock.
*/
void create_empty_buffers(struct page *page,
unsigned long blocksize, unsigned long b_state)
{
struct buffer_head *bh, *head, *tail;
head = alloc_page_buffers(page, blocksize, 1);
bh = head;
do {
bh->b_state |= b_state;
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
spin_lock(&page->mapping->private_lock);
if (PageUptodate(page) || PageDirty(page)) {
bh = head;
do {
if (PageDirty(page))
set_buffer_dirty(bh);
if (PageUptodate(page))
set_buffer_uptodate(bh);
bh = bh->b_this_page;
} while (bh != head);
}
attach_page_buffers(page, head);
spin_unlock(&page->mapping->private_lock);
}
EXPORT_SYMBOL(create_empty_buffers);
/*
* We are taking a block for data and we don't want any output from any
* buffer-cache aliases starting from return from that function and
* until the moment when something will explicitly mark the buffer
* dirty (hopefully that will not happen until we will free that block ;-)
* We don't even need to mark it not-uptodate - nobody can expect
* anything from a newly allocated buffer anyway. We used to used
* unmap_buffer() for such invalidation, but that was wrong. We definitely
* don't want to mark the alias unmapped, for example - it would confuse
* anyone who might pick it with bread() afterwards...
*
* Also.. Note that bforget() doesn't lock the buffer. So there can
* be writeout I/O going on against recently-freed buffers. We don't
* wait on that I/O in bforget() - it's more efficient to wait on the I/O
* only if we really need to. That happens here.
*/
void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
{
struct buffer_head *old_bh;
might_sleep();
old_bh = __find_get_block_slow(bdev, block);
if (old_bh) {
clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh);
clear_buffer_req(old_bh);
__brelse(old_bh);
}
}
EXPORT_SYMBOL(unmap_underlying_metadata);
/*
* NOTE! All mapped/uptodate combinations are valid:
*
* Mapped Uptodate Meaning
*
* No No "unknown" - must do get_block()
* No Yes "hole" - zero-filled
* Yes No "allocated" - allocated on disk, not read in
* Yes Yes "valid" - allocated and up-to-date in memory.
*
* "Dirty" is valid only with the last case (mapped+uptodate).
*/
/*
* While block_write_full_page is writing back the dirty buffers under
* the page lock, whoever dirtied the buffers may decide to clean them
* again at any time. We handle that by only looking at the buffer
* state inside lock_buffer().
*
* If block_write_full_page() is called for regular writeback
* (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
* locked buffer. This only can happen if someone has written the buffer
* directly, with submit_bh(). At the address_space level PageWriteback
* prevents this contention from occurring.
*
* If block_write_full_page() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
* causes the writes to be flagged as synchronous writes.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler)
{
int err;
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
/*
* Be very careful. We have no exclusion from __set_page_dirty_buffers
* here, and the (potentially unmapped) buffers may become dirty at
* any time. If a buffer becomes dirty here after we've inspected it
* then we just miss that fact, and the page stays dirty.
*
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
* handle that here by just cleaning them.
*/
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
head = page_buffers(page);
bh = head;
/*
* Get all the dirty buffers mapped to disk addresses and
* handle any aliases from the underlying blockdev's mapping.
*/
do {
if (block > last_block) {
/*
* mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
* truncate in progress.
*/
/*
* The buffer was zeroed by block_write_full_page()
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
buffer_dirty(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
clear_buffer_delay(bh);
if (buffer_new(bh)) {
/* blockdev mappings never come here */
clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
}
}
bh = bh->b_this_page;
block++;
} while (bh != head);
do {
if (!buffer_mapped(bh))
continue;
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
* potentially cause a busy-wait loop from writeback threads
* and kswapd activity, but those code paths have their own
* higher-level throttling.
*/
if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
continue;
}
if (test_clear_buffer_dirty(bh)) {
mark_buffer_async_write_endio(bh, handler);
} else {
unlock_buffer(bh);
}
} while ((bh = bh->b_this_page) != head);
/*
* The page and its buffers are protected by PageWriteback(), so we can
* drop the bh refcounts early.
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(write_op, bh);
nr_underway++;
}
bh = next;
} while (bh != head);
unlock_page(page);
err = 0;
done:
if (nr_underway == 0) {
/*
* The page was marked dirty, but the buffers were
* clean. Someone wrote them back by hand with
* ll_rw_block/submit_bh. A rare case.
*/
end_page_writeback(page);
/*
* The page and buffer_heads can be released at any time from
* here on.
*/
}
return err;
recover:
/*
* ENOSPC, or some other error. We may already have added some
* blocks to the file, so we need to write these out to avoid
* exposing stale data.
* The page is currently locked and not marked for writeback
*/
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
mark_buffer_async_write_endio(bh, handler);
} else {
/*
* The buffer may have been set dirty during
* attachment to a dirty page.
*/
clear_buffer_dirty(bh);
}
} while ((bh = bh->b_this_page) != head);
SetPageError(page);
BUG_ON(PageWriteback(page));
mapping_set_error(page->mapping, err);
set_page_writeback(page);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
submit_bh(write_op, bh);
nr_underway++;
}
bh = next;
} while (bh != head);
unlock_page(page);
goto done;
}
/*
* If a page has any new buffers, zero them out here, and mark them uptodate
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
{
unsigned int block_start, block_end;
struct buffer_head *head, *bh;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
return;
bh = head = page_buffers(page);
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
if (!PageUptodate(page)) {
unsigned start, size;
start = max(from, block_start);
size = min(to, block_end) - start;
zero_user(page, start, size);
set_buffer_uptodate(bh);
}
clear_buffer_new(bh);
mark_buffer_dirty(bh);
}
}
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
}
EXPORT_SYMBOL(page_zero_new_buffers);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_CACHE_SIZE);
BUG_ON(to > PAGE_CACHE_SIZE);
BUG_ON(from > to);
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = inode->i_blkbits;
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
zero_user_segments(page,
to, block_end,
block_start, from);
continue;
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++=bh;
}
}
/*
* If we issued read requests - let them complete.
*/
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
return err;
}
EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
{
unsigned block_start, block_end;
int partial = 0;
unsigned blocksize;
struct buffer_head *bh, *head;
blocksize = 1 << inode->i_blkbits;
for(bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
partial = 1;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
clear_buffer_new(bh);
}
/*
* If this is a partial write which happened to make all buffers
* uptodate then we can optimize away a bogus readpage() for
* the next read(). Here we 'discover' whether the page went
* uptodate as a result of this (potentially partial) write.
*/
if (!partial)
SetPageUptodate(page);
return 0;
}
/*
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
*
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
int status;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
status = __block_write_begin(page, pos, len, get_block);
if (unlikely(status)) {
unlock_page(page);
page_cache_release(page);
page = NULL;
}
*pagep = page;
return status;
}
EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
unsigned start;
start = pos & (PAGE_CACHE_SIZE - 1);
if (unlikely(copied < len)) {
/*
* The buffers that were written will now be uptodate, so we
* don't have to worry about a readpage reading them and
* overwriting a partial write. However if we have encountered
* a short write and only partially written into a buffer, it
* will not be marked uptodate, so a readpage might come in and
* destroy our partial write.
*
* Do the simplest thing, and just treat any short write to a
* non uptodate page as a zero-length write, and force the
* caller to redo the whole thing.
*/
if (!PageUptodate(page))
copied = 0;
page_zero_new_buffers(page, start+copied, start+len);
}
flush_dcache_page(page);
/* This could be a short (even 0-length) commit */
__block_commit_write(inode, page, start, start+copied);
return copied;
}
EXPORT_SYMBOL(block_write_end);
int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
int i_size_changed = 0;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold i_mutex.
*
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos+copied > inode->i_size) {
i_size_write(inode, pos+copied);
i_size_changed = 1;
}
unlock_page(page);
page_cache_release(page);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
mark_inode_dirty(inode);
return copied;
}
EXPORT_SYMBOL(generic_write_end);
/*
* block_is_partially_uptodate checks whether buffers within a page are
* uptodate or not.
*
* Returns true if all buffers which correspond to a file portion
* we want to read are uptodate.
*/
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from)
{
struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
unsigned to;
struct buffer_head *bh, *head;
int ret = 1;
if (!page_has_buffers(page))
return 0;
blocksize = 1 << inode->i_blkbits;
to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
to = from + to;
if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
return 0;
head = page_buffers(page);
bh = head;
block_start = 0;
do {
block_end = block_start + blocksize;
if (block_end > from && block_start < to) {
if (!buffer_uptodate(bh)) {
ret = 0;
break;
}
if (block_end >= to)
break;
}
block_start = block_end;
bh = bh->b_this_page;
} while (bh != head);
return ret;
}
EXPORT_SYMBOL(block_is_partially_uptodate);
/*
* Generic "read page" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
* Reads the page asynchronously --- the unlock_buffer() and
* set/clear_buffer_uptodate() functions propagate buffer state into the
* page struct once IO has completed.
*/
int block_read_full_page(struct page *page, get_block_t *get_block)
{
struct inode *inode = page->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize;
int nr, i;
int fully_mapped = 1;
BUG_ON(!PageLocked(page));
blocksize = 1 << inode->i_blkbits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
bh = head;
nr = 0;
i = 0;
do {
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
int err = 0;
fully_mapped = 0;
if (iblock < lblock) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
zero_user(page, i * blocksize, blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
}
/*
* get_block() might have updated the buffer
* synchronously
*/
if (buffer_uptodate(bh))
continue;
}
arr[nr++] = bh;
} while (i++, iblock++, (bh = bh->b_this_page) != head);
if (fully_mapped)
SetPageMappedToDisk(page);
if (!nr) {
/*
* All buffers are uptodate - we can set the page uptodate
* as well. But not if get_block() returned an error.
*/
if (!PageError(page))
SetPageUptodate(page);
unlock_page(page);
return 0;
}
/* Stage two: lock the buffers */
for (i = 0; i < nr; i++) {
bh = arr[i];
lock_buffer(bh);
mark_buffer_async_read(bh);
}
/*
* Stage 3: start the IO. Check for uptodateness
* inside the buffer lock in case another process reading
* the underlying blockdev brought it uptodate (the sct fix).
*/
for (i = 0; i < nr; i++) {
bh = arr[i];
if (buffer_uptodate(bh))
end_buffer_async_read(bh, 1);
else
submit_bh(READ, bh);
}
return 0;
}
EXPORT_SYMBOL(block_read_full_page);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
* deal with the hole.
*/
int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
int err;
err = inode_newsize_ok(inode, size);
if (err)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0,
AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
&page, &fsdata);
if (err)
goto out;
err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
BUG_ON(err > 0);
out:
return err;
}
EXPORT_SYMBOL(generic_cont_expand_simple);
static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
unsigned blocksize = 1 << inode->i_blkbits;
struct page *page;
void *fsdata;
pgoff_t index, curidx;
loff_t curpos;
unsigned zerofrom, offset, len;
int err = 0;
index = pos >> PAGE_CACHE_SHIFT;
offset = pos & ~PAGE_CACHE_MASK;
while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
zerofrom = curpos & ~PAGE_CACHE_MASK;
if (zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
len = PAGE_CACHE_SIZE - zerofrom;
err = pagecache_write_begin(file, mapping, curpos, len,
AOP_FLAG_UNINTERRUPTIBLE,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
err = 0;
balance_dirty_pages_ratelimited(mapping);
}
/* page covers the boundary, find the boundary offset */
if (index == curidx) {
zerofrom = curpos & ~PAGE_CACHE_MASK;
/* if we will expand the thing last block will be filled */
if (offset <= zerofrom) {
goto out;
}
if (zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
len = offset - zerofrom;
err = pagecache_write_begin(file, mapping, curpos, len,
AOP_FLAG_UNINTERRUPTIBLE,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
BUG_ON(err != len);
err = 0;
}
out:
return err;
}
/*
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
*/
int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
unsigned blocksize = 1 << inode->i_blkbits;
unsigned zerofrom;
int err;
err = cont_expand_zero(file, mapping, pos, bytes);
if (err)
return err;
zerofrom = *bytes & ~PAGE_CACHE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
__block_commit_write(inode,page,from,to);
return 0;
}
EXPORT_SYMBOL(block_commit_write);
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
* be careful to check for EOF conditions here. We set the page up correctly
* for a written page which means we get ENOSPC checking when writing into
* holes and correct delalloc and unwritten extent mapping on filesystems that
* support these features.
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
* truncate writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*
* Direct callers of this function should protect against filesystem freezing
* using sb_start_write() - sb_end_write() functions.
*/
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
unsigned long end;
loff_t size;
int ret;
/*
* Update file times before taking page lock. We may end up failing the
* fault so this update may be superfluous but who really cares...
*/
file_update_time(vma->vm_file);
lock_page(page);
size = i_size_read(inode);
if ((page->mapping != inode->i_mapping) ||
(page_offset(page) > size)) {
/* We overload EFAULT to mean page got truncated */
ret = -EFAULT;
goto out_unlock;
}
/* page is wholly or partially inside EOF */
if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
end = size & ~PAGE_CACHE_MASK;
else
end = PAGE_CACHE_SIZE;
ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
ret = block_commit_write(page, 0, end);
if (unlikely(ret < 0))
goto out_unlock;
set_page_dirty(page);
wait_on_page_writeback(page);
return 0;
out_unlock:
unlock_page(page);
return ret;
}
EXPORT_SYMBOL(__block_page_mkwrite);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
int ret;
struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
sb_start_pagefault(sb);
ret = __block_page_mkwrite(vma, vmf, get_block);
sb_end_pagefault(sb);
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL(block_page_mkwrite);
/*
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
* immediately, while under the page lock. So it needs a special end_io
* handler which does not touch the bh after unlocking it.
*/
static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
{
__end_buffer_read_notouch(bh, uptodate);
}
/*
* Attach the singly-linked list of buffers created by nobh_write_begin, to
* the page (converting it to circular linked list and taking care of page
* dirty races).
*/
static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
{
struct buffer_head *bh;
BUG_ON(!PageLocked(page));
spin_lock(&page->mapping->private_lock);
bh = head;
do {
if (PageDirty(page))
set_buffer_dirty(bh);
if (!bh->b_this_page)
bh->b_this_page = head;
bh = bh->b_this_page;
} while (bh != head);
attach_page_buffers(page, head);
spin_unlock(&page->mapping->private_lock);
}
/*
* On entry, the page is fully not uptodate.
* On exit the page is fully uptodate in the areas outside (from,to)
* The filesystem needs to handle block truncation upon failure.
*/
int nobh_write_begin(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block)
{
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocksize = 1 << blkbits;
struct buffer_head *head, *bh;
struct page *page;
pgoff_t index;
unsigned from, to;
unsigned block_in_page;
unsigned block_start, block_end;
sector_t block_in_file;
int nr_reads = 0;
int ret = 0;
int is_mapped_to_disk = 1;
index = pos >> PAGE_CACHE_SHIFT;
from = pos & (PAGE_CACHE_SIZE - 1);
to = from + len;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
*pagep = page;
*fsdata = NULL;
if (page_has_buffers(page)) {
ret = __block_write_begin(page, pos, len, get_block);
if (unlikely(ret))
goto out_release;
return ret;
}
if (PageMappedToDisk(page))
return 0;
/*
* Allocate buffers so that we can keep track of state, and potentially
* attach them to the page if an error occurs. In the common case of
* no error, they will just be freed again without ever being attached
* to the page (which is all OK, because we're under the page lock).
*
* Be careful: the buffer linked list is a NULL terminated one, rather
* than the circular one we're used to.
*/
head = alloc_page_buffers(page, blocksize, 0);
if (!head) {
ret = -ENOMEM;
goto out_release;
}
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
/*
* We loop across all blocks in the page, whether or not they are
* part of the affected region. This is so we can discover if the
* page is fully mapped-to-disk.
*/
for (block_start = 0, block_in_page = 0, bh = head;
block_start < PAGE_CACHE_SIZE;
block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
int create;
block_end = block_start + blocksize;
bh->b_state = 0;
create = 1;
if (block_start >= to)
create = 0;
ret = get_block(inode, block_in_file + block_in_page,
bh, create);
if (ret)
goto failed;
if (!buffer_mapped(bh))
is_mapped_to_disk = 0;
if (buffer_new(bh))
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
if (PageUptodate(page)) {
set_buffer_uptodate(bh);
continue;
}
if (buffer_new(bh) || !buffer_mapped(bh)) {
zero_user_segments(page, block_start, from,
to, block_end);
continue;
}
if (buffer_uptodate(bh))
continue; /* reiserfs does this */
if (block_start < from || block_end > to) {
lock_buffer(bh);
bh->b_end_io = end_buffer_read_nobh;
submit_bh(READ, bh);
nr_reads++;
}
}
if (nr_reads) {
/*
* The page is locked, so these buffers are protected from
* any VM or truncate activity. Hence we don't need to care
* for the buffer_head refcounts.
*/
for (bh = head; bh; bh = bh->b_this_page) {
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
ret = -EIO;
}
if (ret)
goto failed;
}
if (is_mapped_to_disk)
SetPageMappedToDisk(page);
*fsdata = head; /* to be released by nobh_write_end */
return 0;
failed:
BUG_ON(!ret);
/*
* Error recovery is a bit difficult. We need to zero out blocks that
* were newly allocated, and dirty them to ensure they get written out.
* Buffers need to be attached to the page at this point, otherwise
* the handling of potential IO errors during writeout would be hard
* (could try doing synchronous writeout, but what if that fails too?)
*/
attach_nobh_buffers(page, head);
page_zero_new_buffers(page, from, to);
out_release:
unlock_page(page);
page_cache_release(page);
*pagep = NULL;
return ret;
}
EXPORT_SYMBOL(nobh_write_begin);
int nobh_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
struct buffer_head *head = fsdata;
struct buffer_head *bh;
BUG_ON(fsdata != NULL && page_has_buffers(page));
if (unlikely(copied < len) && head)
attach_nobh_buffers(page, head);
if (page_has_buffers(page))
return generic_write_end(file, mapping, pos, len,
copied, page, fsdata);
SetPageUptodate(page);
set_page_dirty(page);
if (pos+copied > inode->i_size) {
i_size_write(inode, pos+copied);
mark_inode_dirty(inode);
}
unlock_page(page);
page_cache_release(page);
while (head) {
bh = head;
head = head->b_this_page;
free_buffer_head(bh);
}
return copied;
}
EXPORT_SYMBOL(nobh_write_end);
/*
* nobh_writepage() - based on block_full_write_page() except
* that it tries to operate without attaching bufferheads to
* the page.
*/
int nobh_writepage(struct page *page, get_block_t *get_block,
struct writeback_control *wbc)
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
int ret;
/* Is the page fully inside i_size? */
if (page->index < end_index)
goto out;
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
/*
* The page may have dirty, unmapped buffers. For example,
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
#if 0
/* Not really sure about this - do we need this ? */
if (page->mapping->a_ops->invalidatepage)
page->mapping->a_ops->invalidatepage(page, offset);
#endif
unlock_page(page);
return 0; /* don't care */
}
/*
* The page straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
ret = __block_write_full_page(inode, page, get_block, wbc,
end_buffer_async_write);
return ret;
}
EXPORT_SYMBOL(nobh_writepage);
int nobh_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
sector_t iblock;
unsigned length, pos;
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head map_bh;
int err;
blocksize = 1 << inode->i_blkbits;
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!length)
return 0;
length = blocksize - length;
iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
if (!page)
goto out;
if (page_has_buffers(page)) {
has_buffers:
unlock_page(page);
page_cache_release(page);
return block_truncate_page(mapping, from, get_block);
}
/* Find the buffer that contains "offset" */
pos = blocksize;
while (offset >= pos) {
iblock++;
pos += blocksize;
}
map_bh.b_size = blocksize;
map_bh.b_state = 0;
err = get_block(inode, iblock, &map_bh, 0);
if (err)
goto unlock;
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(&map_bh))
goto unlock;
/* Ok, it's mapped. Make sure it's up-to-date */
if (!PageUptodate(page)) {
err = mapping->a_ops->readpage(NULL, page);
if (err) {
page_cache_release(page);
goto out;
}
lock_page(page);
if (!PageUptodate(page)) {
err = -EIO;
goto unlock;
}
if (page_has_buffers(page))
goto has_buffers;
}
zero_user(page, offset, length);
set_page_dirty(page);
err = 0;
unlock:
unlock_page(page);
page_cache_release(page);
out:
return err;
}
EXPORT_SYMBOL(nobh_truncate_page);
int block_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
sector_t iblock;
unsigned length, pos;
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head *bh;
int err;
blocksize = 1 << inode->i_blkbits;
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!length)
return 0;
length = blocksize - length;
iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
if (!page)
goto out;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
err = 0;
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh))
goto unlock;
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
}
zero_user(page, offset, length);
mark_buffer_dirty(bh);
err = 0;
unlock:
unlock_page(page);
page_cache_release(page);
out:
return err;
}
EXPORT_SYMBOL(block_truncate_page);
/*
* The generic ->writepage function for buffer-backed address_spaces
* this form passes in the end_io handler used to finish the IO.
*/
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
struct writeback_control *wbc, bh_end_io_t *handler)
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
/* Is the page fully inside i_size? */
if (page->index < end_index)
return __block_write_full_page(inode, page, get_block, wbc,
handler);
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
/*
* The page may have dirty, unmapped buffers. For example,
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
do_invalidatepage(page, 0);
unlock_page(page);
return 0; /* don't care */
}
/*
* The page straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc, handler);
}
EXPORT_SYMBOL(block_write_full_page_endio);
/*
* The generic ->writepage function for buffer-backed address_spaces
*/
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc)
{
return block_write_full_page_endio(page, get_block, wbc,
end_buffer_async_write);
}
EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
{
struct buffer_head tmp;
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio, int err)
{
struct buffer_head *bh = bio->bi_private;
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
}
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
set_bit(BH_Quiet, &bh->b_state);
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
int submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
int ret = 0;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
BUG_ON(buffer_delay(bh));
BUG_ON(buffer_unwritten(bh));
/*
* Only clear out a write error when rewriting
*/
if (test_set_buffer_req(bh) && (rw & WRITE))
clear_buffer_write_io_error(bh);
/*
* from here on down, it's all bio -- do the initial mapping,
* submit_bio -> generic_make_request may further map this bio around
*/
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
bio->bi_vcnt = 1;
bio->bi_idx = 0;
bio->bi_size = bh->b_size;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
bio_get(bio);
submit_bio(rw, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
ret = -EOPNOTSUPP;
bio_put(bio);
return ret;
}
EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
* requests an I/O operation on them, either a %READ or a %WRITE. The third
* %READA option is described in the documentation for generic_make_request()
* which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
* BH_Lock state bit), any buffer that appears to be clean when doing a write
* request, and any buffer that appears to be up-to-date when doing read
* request. Further it marks as clean buffers that are processed for
* writing (the buffer cache won't assume that they are actually clean
* until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
* any waiters.
*
* All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device.
*/
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
{
int i;
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (!trylock_buffer(bh))
continue;
if (rw == WRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh(WRITE, bh);
continue;
}
} else {
if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
submit_bh(rw, bh);
continue;
}
}
unlock_buffer(bh);
}
}
EXPORT_SYMBOL(ll_rw_block);
void write_dirty_buffer(struct buffer_head *bh, int rw)
{
lock_buffer(bh);
if (!test_clear_buffer_dirty(bh)) {
unlock_buffer(bh);
return;
}
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh(rw, bh);
}
EXPORT_SYMBOL(write_dirty_buffer);
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
* and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head.
*/
int __sync_dirty_buffer(struct buffer_head *bh, int rw)
{
int ret = 0;
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(rw, bh);
wait_on_buffer(bh);
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
} else {
unlock_buffer(bh);
}
return ret;
}
EXPORT_SYMBOL(__sync_dirty_buffer);
int sync_dirty_buffer(struct buffer_head *bh)
{
return __sync_dirty_buffer(bh, WRITE_SYNC);
}
EXPORT_SYMBOL(sync_dirty_buffer);
/*
* try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and releases them if so.
*
* Exclusion against try_to_free_buffers may be obtained by either
* locking the page or by holding its mapping's private_lock.
*
* If the page is dirty but all the buffers are clean then we need to
* be sure to mark the page clean as well. This is because the page
* may be against a block device, and a later reattachment of buffers
* to a dirty page will set *all* buffers dirty. Which would corrupt
* filesystem data on the same device.
*
* The same applies to regular filesystem pages: if all the buffers are
* clean then we set the page clean and proceed. To do that, we require
* total exclusion from __set_page_dirty_buffers(). That is obtained with
* private_lock.
*
* try_to_free_buffers() is non-blocking.
*/
static inline int buffer_busy(struct buffer_head *bh)
{
return atomic_read(&bh->b_count) |
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
static int
drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh;
bh = head;
do {
if (buffer_write_io_error(bh) && page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
if (buffer_busy(bh))
goto failed;
bh = bh->b_this_page;
} while (bh != head);
do {
struct buffer_head *next = bh->b_this_page;
if (bh->b_assoc_map)
__remove_assoc_queue(bh);
bh = next;
} while (bh != head);
*buffers_to_free = head;
__clear_page_buffers(page);
return 1;
failed:
return 0;
}
int try_to_free_buffers(struct page *page)
{
struct address_space * const mapping = page->mapping;
struct buffer_head *buffers_to_free = NULL;
int ret = 0;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping == NULL) { /* can this still happen? */
ret = drop_buffers(page, &buffers_to_free);
goto out;
}
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
/*
* If the filesystem writes its buffers by hand (eg ext3)
* then we can have clean buffers against a dirty page. We
* clean the page here; otherwise the VM will never notice
* that the filesystem did any IO at all.
*
* Also, during truncate, discard_buffer will have marked all
* the page's buffers clean. We discover that here and clean
* the page also.
*
* private_lock must be held over this entire operation in order
* to synchronise against __set_page_dirty_buffers and prevent the
* dirty bit from being lost.
*/
if (ret)
cancel_dirty_page(page, PAGE_CACHE_SIZE);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
do {
struct buffer_head *next = bh->b_this_page;
free_buffer_head(bh);
bh = next;
} while (bh != buffers_to_free);
}
return ret;
}
EXPORT_SYMBOL(try_to_free_buffers);
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
*
* Use of bdflush() is deprecated and will be removed in a future kernel.
* The `flush-X' kernel threads fully replace bdflush daemons and this call.
*/
SYSCALL_DEFINE2(bdflush, int, func, long, data)
{
static int msg_count;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (msg_count < 5) {
msg_count++;
printk(KERN_INFO
"warning: process `%s' used the obsolete bdflush"
" system call\n", current->comm);
printk(KERN_INFO "Fix your initscripts?\n");
}
if (func == 1)
do_exit(0);
return 0;
}
/*
* Buffer-head allocation
*/
static struct kmem_cache *bh_cachep __read_mostly;
/*
* Once the number of bh's in the machine exceeds this level, we start
* stripping them in writeback.
*/
static int max_buffer_heads;
int buffer_heads_over_limit;
struct bh_accounting {
int nr; /* Number of live bh's */
int ratelimit; /* Limit cacheline bouncing */
};
static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
static void recalc_bh_state(void)
{
int i;
int tot = 0;
if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
return;
__this_cpu_write(bh_accounting.ratelimit, 0);
for_each_online_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
preempt_enable();
}
return ret;
}
EXPORT_SYMBOL(alloc_buffer_head);
void free_buffer_head(struct buffer_head *bh)
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
kmem_cache_free(bh_cachep, bh);
preempt_disable();
__this_cpu_dec(bh_accounting.nr);
recalc_bh_state();
preempt_enable();
}
EXPORT_SYMBOL(free_buffer_head);
static void buffer_exit_cpu(int cpu)
{
int i;
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
for (i = 0; i < BH_LRU_SIZE; i++) {
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
per_cpu(bh_accounting, cpu).nr = 0;
}
static int buffer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
buffer_exit_cpu((unsigned long)hcpu);
return NOTIFY_OK;
}
/**
* bh_uptodate_or_lock - Test whether the buffer is uptodate
* @bh: struct buffer_head
*
* Return true if the buffer is up-to-date and false,
* with the buffer locked, if not.
*/
int bh_uptodate_or_lock(struct buffer_head *bh)
{
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
if (!buffer_uptodate(bh))
return 0;
unlock_buffer(bh);
}
return 1;
}
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
* bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.
*/
int bh_submit_read(struct buffer_head *bh)
{
BUG_ON(!buffer_locked(bh));
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
return 0;
}
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return 0;
return -EIO;
}
EXPORT_SYMBOL(bh_submit_read);
void __init buffer_init(void)
{
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
*/
nrpages = (nr_free_buffer_pages() * 10) / 100;
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
hotcpu_notifier(buffer_cpu_notify, 0);
}
| soap-DEIM/l4android | fs/buffer.c | C | gpl-2.0 | 87,133 |
/* Helper handling for netfilter. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/random.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_log.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
struct hlist_head *nf_ct_helper_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
unsigned int nf_ct_helper_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
static unsigned int nf_ct_helper_count __read_mostly;
static bool nf_ct_auto_assign_helper __read_mostly = false;
module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
MODULE_PARM_DESC(nf_conntrack_helper,
"Enable automatic conntrack helper assignment (default 0)");
/* Stupid hash, but collision free for the default registrations of the
* helpers currently in the kernel. */
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
{
return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^
(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
}
static struct nf_conntrack_helper *
__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h;
if (!nf_ct_helper_count)
return NULL;
h = helper_hash(tuple);
hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
return helper;
}
return NULL;
}
struct nf_conntrack_helper *
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
unsigned int i;
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
if (strcmp(h->name, name))
continue;
if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
h->tuple.src.l3num != l3num)
continue;
if (h->tuple.dst.protonum == protonum)
return h;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
struct nf_conntrack_helper *
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
#ifdef CONFIG_MODULES
if (h == NULL) {
rcu_read_unlock();
if (request_module("nfct-helper-%s", name) == 0) {
rcu_read_lock();
h = __nf_conntrack_helper_find(name, l3num, protonum);
} else {
return h;
}
}
#endif
if (h != NULL && !try_module_get(h->me))
h = NULL;
if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
module_put(h->me);
h = NULL;
}
rcu_read_unlock();
return h;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
{
refcount_dec(&helper->refcnt);
module_put(helper->me);
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
struct nf_conn_help *
nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
{
struct nf_conn_help *help;
help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
if (help)
INIT_HLIST_HEAD(&help->expectations);
else
pr_debug("failed to add helper extension area");
return help;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
static struct nf_conntrack_helper *
nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
{
if (!net->ct.sysctl_auto_assign_helper) {
if (net->ct.auto_assign_helper_warned)
return NULL;
if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple))
return NULL;
pr_info("nf_conntrack: default automatic helper assignment "
"has been turned off for security reasons and CT-based "
" firewall rule not found. Use the iptables CT target "
"to attach helpers instead.\n");
net->ct.auto_assign_helper_warned = 1;
return NULL;
}
return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
}
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags)
{
struct nf_conntrack_helper *helper = NULL;
struct nf_conn_help *help;
struct net *net = nf_ct_net(ct);
/* We already got a helper explicitly attached. The function
* nf_conntrack_alter_reply - in case NAT is in use - asks for looking
* the helper up again. Since now the user is in full control of
* making consistent helper configurations, skip this automatic
* re-lookup, otherwise we'll lose the helper.
*/
if (test_bit(IPS_HELPER_BIT, &ct->status))
return 0;
if (tmpl != NULL) {
help = nfct_help(tmpl);
if (help != NULL) {
helper = help->helper;
set_bit(IPS_HELPER_BIT, &ct->status);
}
}
help = nfct_help(ct);
if (helper == NULL) {
helper = nf_ct_lookup_helper(ct, net);
if (helper == NULL) {
if (help)
RCU_INIT_POINTER(help->helper, NULL);
return 0;
}
}
if (help == NULL) {
help = nf_ct_helper_ext_add(ct, flags);
if (help == NULL)
return -ENOMEM;
} else {
/* We only allow helper re-assignment of the same sort since
* we cannot reallocate the helper extension area.
*/
struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
if (tmp && tmp->help != helper->help) {
RCU_INIT_POINTER(help->helper, NULL);
return 0;
}
}
rcu_assign_pointer(help->helper, helper);
return 0;
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
/* appropriate ct lock protecting must be taken by caller */
static int unhelp(struct nf_conn *ct, void *me)
{
struct nf_conn_help *help = nfct_help(ct);
if (help && rcu_dereference_raw(help->helper) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
RCU_INIT_POINTER(help->helper, NULL);
}
/* We are not intended to delete this conntrack. */
return 0;
}
void nf_ct_helper_destroy(struct nf_conn *ct)
{
struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_helper *helper;
if (help) {
rcu_read_lock();
helper = rcu_dereference(help->helper);
if (helper && helper->destroy)
helper->destroy(ct);
rcu_read_unlock();
}
}
static LIST_HEAD(nf_ct_helper_expectfn_list);
void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_expect_lock);
list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
{
spin_lock_bh(&nf_conntrack_expect_lock);
list_del_rcu(&n->head);
spin_unlock_bh(&nf_conntrack_expect_lock);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_name(const char *name)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (!strcmp(cur->name, name)) {
found = true;
break;
}
}
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
/* Caller should hold the rcu lock */
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
{
struct nf_ct_helper_expectfn *cur;
bool found = false;
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
if (cur->expectfn == symbol) {
found = true;
break;
}
}
return found ? cur : NULL;
}
EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
__printf(3, 4)
void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
const char *fmt, ...)
{
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
/* Called from the helper function, this call never fails */
help = nfct_help(ct);
/* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
"nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_log);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
struct nf_conntrack_helper *cur;
int ret = 0, i;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
mutex_lock(&nf_ct_helper_mutex);
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) {
if (!strcmp(cur->name, me->name) &&
(cur->tuple.src.l3num == NFPROTO_UNSPEC ||
cur->tuple.src.l3num == me->tuple.src.l3num) &&
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
ret = -EEXIST;
goto out;
}
}
}
/* avoid unpredictable behaviour for auto_assign_helper */
if (!(me->flags & NF_CT_HELPER_F_USERSPACE)) {
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
&mask)) {
ret = -EEXIST;
goto out;
}
}
}
refcount_set(&me->refcnt, 1);
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
nf_ct_helper_count++;
out:
mutex_unlock(&nf_ct_helper_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data)
{
struct nf_conn_help *help = nfct_help(exp->master);
const struct nf_conntrack_helper *me = data;
const struct nf_conntrack_helper *this;
if (exp->helper == me)
return true;
this = rcu_dereference_protected(help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
return this == me;
}
void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
{
mutex_lock(&nf_ct_helper_mutex);
hlist_del_rcu(&me->hnode);
nf_ct_helper_count--;
mutex_unlock(&nf_ct_helper_mutex);
/* Make sure every nothing is still using the helper unless its a
* connection in the hash.
*/
synchronize_rcu();
nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
nf_ct_iterate_destroy(unhelp, me);
/* Maybe someone has gotten the helper already when unhelp above.
* So need to wait it.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
void nf_ct_helper_init(struct nf_conntrack_helper *helper,
u16 l3num, u16 protonum, const char *name,
u16 default_port, u16 spec_port, u32 id,
const struct nf_conntrack_expect_policy *exp_pol,
u32 expect_class_max,
int (*help)(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo),
int (*from_nlattr)(struct nlattr *attr,
struct nf_conn *ct),
struct module *module)
{
helper->tuple.src.l3num = l3num;
helper->tuple.dst.protonum = protonum;
helper->tuple.src.u.all = htons(spec_port);
helper->expect_policy = exp_pol;
helper->expect_class_max = expect_class_max;
helper->help = help;
helper->from_nlattr = from_nlattr;
helper->me = module;
if (spec_port == default_port)
snprintf(helper->name, sizeof(helper->name), "%s", name);
else
snprintf(helper->name, sizeof(helper->name), "%s-%u", name, id);
}
EXPORT_SYMBOL_GPL(nf_ct_helper_init);
int nf_conntrack_helpers_register(struct nf_conntrack_helper *helper,
unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = nf_conntrack_helper_register(&helper[i]);
if (err < 0)
goto err;
}
return err;
err:
if (i > 0)
nf_conntrack_helpers_unregister(helper, i);
return err;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helpers_register);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *helper,
unsigned int n)
{
while (n-- > 0)
nf_conntrack_helper_unregister(&helper[n]);
}
EXPORT_SYMBOL_GPL(nf_conntrack_helpers_unregister);
static const struct nf_ct_ext_type helper_extend = {
.len = sizeof(struct nf_conn_help),
.align = __alignof__(struct nf_conn_help),
.id = NF_CT_EXT_HELPER,
};
void nf_conntrack_helper_pernet_init(struct net *net)
{
net->ct.auto_assign_helper_warned = false;
net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
}
int nf_conntrack_helper_init(void)
{
int ret;
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
nf_ct_helper_hash =
nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
ret = nf_ct_extend_register(&helper_extend);
if (ret < 0) {
pr_err("nf_ct_helper: Unable to register helper extension.\n");
goto out_extend;
}
return 0;
out_extend:
kvfree(nf_ct_helper_hash);
return ret;
}
void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
kvfree(nf_ct_helper_hash);
}
| qzhuyan/linux | net/netfilter/nf_conntrack_helper.c | C | gpl-2.0 | 13,479 |
# Testing and Debugging
## Testing
[Moved here](faq_misc.md#testing)
## Debugging :id=debugging
[Moved here](faq_debug.md#debugging)
| kmtoki/qmk_firmware | docs/newbs_testing_debugging.md | Markdown | gpl-2.0 | 137 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.metadata;
import com.facebook.presto.spi.type.Type;
import java.util.Map;
import java.util.Objects;
import static com.google.common.base.Preconditions.checkNotNull;
public class SpecializedFunctionKey
{
private final ParametricFunction function;
private final Map<String, Type> boundTypeParameters;
private final int arity;
public SpecializedFunctionKey(ParametricFunction function, Map<String, Type> boundTypeParameters, int arity)
{
this.function = checkNotNull(function, "function is null");
this.boundTypeParameters = checkNotNull(boundTypeParameters, "boundTypeParameters is null");
this.arity = arity;
}
public ParametricFunction getFunction()
{
return function;
}
public Map<String, Type> getBoundTypeParameters()
{
return boundTypeParameters;
}
public int getArity()
{
return arity;
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SpecializedFunctionKey that = (SpecializedFunctionKey) o;
return Objects.equals(arity, that.arity) &&
Objects.equals(boundTypeParameters, that.boundTypeParameters) &&
Objects.equals(function.getSignature(), that.function.getSignature());
}
@Override
public int hashCode()
{
return Objects.hash(function.getSignature(), boundTypeParameters, arity);
}
}
| kuzemchik/presto | presto-main/src/main/java/com/facebook/presto/metadata/SpecializedFunctionKey.java | Java | apache-2.0 | 2,138 |
/*
* Copyright 2012 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
package com.skia;
import javax.microedition.khronos.egl.EGL10;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.egl.EGLDisplay;
import javax.microedition.khronos.opengles.GL10;
import android.content.Context;
import android.opengl.EGL14;
import android.opengl.GLSurfaceView;
import android.os.Build;
import android.util.Log;
import android.view.MotionEvent;
public class SkiaSampleView extends GLSurfaceView {
private final SkiaSampleRenderer mSampleRenderer;
private boolean mRequestedOpenGLAPI; // true == use (desktop) OpenGL. false == use OpenGL ES.
private int mRequestedMSAASampleCount;
public SkiaSampleView(Context ctx, String cmdLineFlags, boolean useOpenGL, int msaaSampleCount) {
super(ctx);
mSampleRenderer = new SkiaSampleRenderer(this, cmdLineFlags);
mRequestedMSAASampleCount = msaaSampleCount;
setEGLContextClientVersion(2);
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN_MR1) {
setEGLConfigChooser(8, 8, 8, 8, 0, 8);
} else {
mRequestedOpenGLAPI = useOpenGL;
setEGLConfigChooser(new SampleViewEGLConfigChooser());
}
setRenderer(mSampleRenderer);
setRenderMode(GLSurfaceView.RENDERMODE_WHEN_DIRTY);
}
@Override
public boolean onTouchEvent(MotionEvent event) {
int count = event.getPointerCount();
for (int i = 0; i < count; i++) {
final float x = event.getX(i);
final float y = event.getY(i);
final int owner = event.getPointerId(i);
int action = event.getAction() & MotionEvent.ACTION_MASK;
switch (action) {
case MotionEvent.ACTION_POINTER_UP:
action = MotionEvent.ACTION_UP;
break;
case MotionEvent.ACTION_POINTER_DOWN:
action = MotionEvent.ACTION_DOWN;
break;
default:
break;
}
final int finalAction = action;
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.handleClick(owner, x, y, finalAction);
}
});
}
return true;
}
public void inval() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.postInval();
}
});
}
public void terminate() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.term();
}
});
}
public void showOverview() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.showOverview();
}
});
}
public void nextSample() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.nextSample();
}
});
}
public void previousSample() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.previousSample();
}
});
}
public void goToSample(final int position) {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.goToSample(position);
}
});
}
public void toggleRenderingMode() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.toggleRenderingMode();
}
});
}
public void toggleSlideshow() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.toggleSlideshow();
}
});
}
public void toggleFPS() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.toggleFPS();
}
});
}
public void toggleTiling() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.toggleTiling();
}
});
}
public void toggleBBox() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.toggleBBox();
}
});
}
public void saveToPDF() {
queueEvent(new Runnable() {
@Override
public void run() {
mSampleRenderer.saveToPDF();
}
});
}
public boolean getUsesOpenGLAPI() {
return mRequestedOpenGLAPI;
}
public int getMSAASampleCount() {
return mSampleRenderer.getMSAASampleCount();
}
private class SampleViewEGLConfigChooser implements GLSurfaceView.EGLConfigChooser {
@Override
public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display) {
int numConfigs = 0;
int[] configSpec = null;
int[] value = new int[1];
int[] validAPIs = new int[] {
EGL14.EGL_OPENGL_API,
EGL14.EGL_OPENGL_ES_API
};
int initialAPI = mRequestedOpenGLAPI ? 0 : 1;
for (int i = initialAPI; i < validAPIs.length && numConfigs == 0; i++) {
int currentAPI = validAPIs[i];
EGL14.eglBindAPI(currentAPI);
// setup the renderableType which will only be included in the
// spec if we are attempting to get access to the OpenGL APIs.
int renderableType = EGL14.EGL_OPENGL_BIT;
if (currentAPI == EGL14.EGL_OPENGL_API) {
renderableType = EGL14.EGL_OPENGL_ES2_BIT;
}
if (mRequestedMSAASampleCount > 0) {
configSpec = new int[] {
EGL10.EGL_RED_SIZE, 8,
EGL10.EGL_GREEN_SIZE, 8,
EGL10.EGL_BLUE_SIZE, 8,
EGL10.EGL_ALPHA_SIZE, 8,
EGL10.EGL_DEPTH_SIZE, 0,
EGL10.EGL_STENCIL_SIZE, 8,
EGL10.EGL_SAMPLE_BUFFERS, 1,
EGL10.EGL_SAMPLES, mRequestedMSAASampleCount,
EGL10.EGL_RENDERABLE_TYPE, renderableType,
EGL10.EGL_NONE
};
// EGL_RENDERABLE_TYPE is only needed when attempting to use
// the OpenGL API (not ES) and causes many EGL drivers to fail
// with a BAD_ATTRIBUTE error.
if (!mRequestedOpenGLAPI) {
configSpec[16] = EGL10.EGL_NONE;
Log.i("Skia", "spec: " + configSpec);
}
if (!egl.eglChooseConfig(display, configSpec, null, 0, value)) {
Log.i("Skia", "Could not get MSAA context count: " + mRequestedMSAASampleCount);
}
numConfigs = value[0];
}
if (numConfigs <= 0) {
// Try without multisampling.
configSpec = new int[] {
EGL10.EGL_RED_SIZE, 8,
EGL10.EGL_GREEN_SIZE, 8,
EGL10.EGL_BLUE_SIZE, 8,
EGL10.EGL_ALPHA_SIZE, 8,
EGL10.EGL_DEPTH_SIZE, 0,
EGL10.EGL_STENCIL_SIZE, 8,
EGL10.EGL_RENDERABLE_TYPE, renderableType,
EGL10.EGL_NONE
};
// EGL_RENDERABLE_TYPE is only needed when attempting to use
// the OpenGL API (not ES) and causes many EGL drivers to fail
// with a BAD_ATTRIBUTE error.
if (!mRequestedOpenGLAPI) {
configSpec[12] = EGL10.EGL_NONE;
Log.i("Skia", "spec: " + configSpec);
}
if (!egl.eglChooseConfig(display, configSpec, null, 0, value)) {
Log.i("Skia", "Could not get non-MSAA context count");
}
numConfigs = value[0];
}
}
if (numConfigs <= 0) {
throw new IllegalArgumentException("No configs match configSpec");
}
// Get all matching configurations.
EGLConfig[] configs = new EGLConfig[numConfigs];
if (!egl.eglChooseConfig(display, configSpec, configs, numConfigs, value)) {
throw new IllegalArgumentException("Could not get config data");
}
for (int i = 0; i < configs.length; ++i) {
EGLConfig config = configs[i];
if (findConfigAttrib(egl, display, config , EGL10.EGL_RED_SIZE, 0) == 8 &&
findConfigAttrib(egl, display, config, EGL10.EGL_BLUE_SIZE, 0) == 8 &&
findConfigAttrib(egl, display, config, EGL10.EGL_GREEN_SIZE, 0) == 8 &&
findConfigAttrib(egl, display, config, EGL10.EGL_ALPHA_SIZE, 0) == 8 &&
findConfigAttrib(egl, display, config, EGL10.EGL_STENCIL_SIZE, 0) == 8) {
return config;
}
}
throw new IllegalArgumentException("Could not find suitable EGL config");
}
private int findConfigAttrib(EGL10 egl, EGLDisplay display,
EGLConfig config, int attribute, int defaultValue) {
int[] value = new int[1];
if (egl.eglGetConfigAttrib(display, config, attribute, value)) {
return value[0];
}
return defaultValue;
}
}
}
| zero-rp/miniblink49 | third_party/skia/platform_tools/android/app/src/com/skia/SkiaSampleView.java | Java | apache-2.0 | 10,208 |
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
package v1beta1
import (
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
unsafe "unsafe"
)
func init() {
SchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1beta1_ClusterRole_To_rbac_ClusterRole,
Convert_rbac_ClusterRole_To_v1beta1_ClusterRole,
Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding,
Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding,
Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder,
Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder,
Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList,
Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList,
Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList,
Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList,
Convert_v1beta1_PolicyRule_To_rbac_PolicyRule,
Convert_rbac_PolicyRule_To_v1beta1_PolicyRule,
Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder,
Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder,
Convert_v1beta1_Role_To_rbac_Role,
Convert_rbac_Role_To_v1beta1_Role,
Convert_v1beta1_RoleBinding_To_rbac_RoleBinding,
Convert_rbac_RoleBinding_To_v1beta1_RoleBinding,
Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList,
Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList,
Convert_v1beta1_RoleList_To_rbac_RoleList,
Convert_rbac_RoleList_To_v1beta1_RoleList,
Convert_v1beta1_RoleRef_To_rbac_RoleRef,
Convert_rbac_RoleRef_To_v1beta1_RoleRef,
Convert_v1beta1_Subject_To_rbac_Subject,
Convert_rbac_Subject_To_v1beta1_Subject,
)
}
func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s)
}
func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error {
return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s)
}
func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error {
if err := Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s)
}
func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error {
if err := Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil {
return err
}
return nil
}
func Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s)
}
func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s)
}
func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error {
return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s)
}
func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error {
return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s)
}
func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error {
return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s)
}
func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error {
out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups))
out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
return nil
}
func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error {
return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s)
}
func autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error {
if err := Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error {
return autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s)
}
func autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error {
if err := Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil {
return err
}
return nil
}
func Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error {
return autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in, out, s)
}
func autoConvert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
func Convert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error {
return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s)
}
func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error {
return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s)
}
func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error {
return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s)
}
func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects))
if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil {
return err
}
return nil
}
func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error {
return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s)
}
func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error {
return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s)
}
func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error {
return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s)
}
func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error {
return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s)
}
func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]Role)(unsafe.Pointer(&in.Items))
return nil
}
func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error {
return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s)
}
func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error {
return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s)
}
func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error {
out.APIGroup = in.APIGroup
out.Kind = in.Kind
out.Name = in.Name
return nil
}
func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error {
return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s)
}
func autoConvert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIVersion = in.APIVersion
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
func Convert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error {
return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s)
}
func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error {
out.Kind = in.Kind
out.APIVersion = in.APIVersion
out.Name = in.Name
out.Namespace = in.Namespace
return nil
}
func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error {
return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s)
}
| aweiteka/cri-o | vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go | GO | apache-2.0 | 15,279 |
package org.apache.maven.repository.metadata;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.apache.maven.artifact.ArtifactScopeEnum;
/**
* Resolves conflicts in the supplied dependency graph.
* Different implementations will implement different conflict resolution policies.
*
* @author <a href="mailto:oleg@codehaus.org">Oleg Gusakov</a>
*/
public interface GraphConflictResolver
{
String ROLE = GraphConflictResolver.class.getName();
/**
* Cleanses the supplied graph by leaving only one directed versioned edge\
* between any two nodes, if multiple exists. Uses scope relationships, defined
* in <code>ArtifactScopeEnum</code>
*
* @param graph the "dirty" graph to be simplified via conflict resolution
* @param scope scope for which the graph should be resolved
*
* @return resulting "clean" graph for the specified scope
*
* @since 3.0
*/
MetadataGraph resolveConflicts( MetadataGraph graph, ArtifactScopeEnum scope )
throws GraphConflictResolutionException;
}
| apache/maven | maven-compat/src/main/java/org/apache/maven/repository/metadata/GraphConflictResolver.java | Java | apache-2.0 | 1,822 |
// FIXME: Tell people that this is a manifest file, real code should go into discrete files
// FIXME: Tell people how Sprockets and CoffeeScript works
//
//= require jquery
//= require jquery_ujs
//= require_tree .
| danielwanja/bulk_data_source_flex | testrailsapp/app/assets/javascripts/application.js | JavaScript | mit | 215 |
## m.sync
This method takes a list of promises and returns a promise that resolves when all promises in the input list have resolved. See [`m.deferred`](mithril.deferred.md) for more information on promises.
---
### Usage
```javascript
var greetAsync = function(delay) {
var deferred = m.deferred();
setTimeout(function() {
deferred.resolve("hello");
}, delay);
return deferred.promise;
};
m.sync([
greetAsync(1000),
greetAsync(1500)
]).then(function(args) {
console.log(args); // ["hello", "hello"]
});
```
---
### Signature
[How to read signatures](how-to-read-signatures.md)
```clike
Promise sync(Array<Promise> promises)
where:
Promise :: GetterSetter { Promise then(any successCallback(any value), any errorCallback(any value)) }
GetterSetter :: any getterSetter([any value])
```
- **Array<Promise> promises**
A list of promises to synchronize
- **return Promise promise**
The promise of the deferred object that is resolved when all input promises have been resolved
The callbacks for this promise receive as a parameter an Array containing the values of all the input promises | pingpp/community | vendor/flarum/core/js/bower_components/mithril/docs/mithril.sync.md | Markdown | mit | 1,114 |
/*
* Copyright (C) 2000-2003 Axis Communications AB
*
* Authors: Bjorn Wesen (bjornw@axis.com)
* Mikael Starvik (starvik@axis.com)
* Tobias Anderberg (tobiasa@axis.com), CRISv32 port.
*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/reg_map.h>
#include <hwregs/timer_defs.h>
#include <hwregs/intr_vect_defs.h>
#include <linux/ptrace.h>
extern void stop_watchdog(void);
/* We use this if we don't have any better idle routine. */
void default_idle(void)
{
local_irq_enable();
/* Halt until exception. */
__asm__ volatile("halt");
}
/*
* Free current thread data structures etc..
*/
extern void deconfigure_bp(long pid);
void exit_thread(struct task_struct *tsk)
{
deconfigure_bp(tsk->pid);
}
/*
* If the watchdog is enabled, disable interrupts and enter an infinite loop.
* The watchdog will reset the CPU after 0.1s. If the watchdog isn't enabled
* then enable it and wait.
*/
extern void arch_enable_nmi(void);
void
hard_reset_now(void)
{
/*
* Don't declare this variable elsewhere. We don't want any other
* code to know about it than the watchdog handler in entry.S and
* this code, implementing hard reset through the watchdog.
*/
#if defined(CONFIG_ETRAX_WATCHDOG)
extern int cause_of_death;
#endif
printk("*** HARD RESET ***\n");
local_irq_disable();
#if defined(CONFIG_ETRAX_WATCHDOG)
cause_of_death = 0xbedead;
#else
{
reg_timer_rw_wd_ctrl wd_ctrl = {0};
stop_watchdog();
wd_ctrl.key = 16; /* Arbitrary key. */
wd_ctrl.cnt = 1; /* Minimum time. */
wd_ctrl.cmd = regk_timer_start;
arch_enable_nmi();
REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl);
}
#endif
while (1)
; /* Wait for reset. */
}
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
return task_pt_regs(t)->erp;
}
/*
* Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
* It will be unnested during _resume and _ret_from_sys_call when the new thread
* is scheduled.
*
* Also setup the thread switching structure which is used to keep
* thread-specific data during _resumes.
*/
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_kernel_thread(void);
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
struct switch_stack *swstack = ((struct switch_stack *) childregs) - 1;
/*
* Put the pt_regs structure at the end of the new kernel stack page and
* fix it up. Note: the task_struct doubles as the kernel stack for the
* task.
*/
if (unlikely(p->flags & PF_KTHREAD)) {
memset(swstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
swstack->r1 = usp;
swstack->r2 = arg;
childregs->ccs = 1 << (I_CCS_BITNR + CCS_SHIFT);
swstack->return_ip = (unsigned long) ret_from_kernel_thread;
p->thread.ksp = (unsigned long) swstack;
p->thread.usp = 0;
return 0;
}
*childregs = *current_pt_regs(); /* Struct copy of pt_regs. */
childregs->r10 = 0; /* Child returns 0 after a fork/clone. */
/* Set a new TLS ?
* The TLS is in $mof because it is the 5th argument to sys_clone.
*/
if (p->mm && (clone_flags & CLONE_SETTLS)) {
task_thread_info(p)->tls = childregs->mof;
}
/* Put the switch stack right below the pt_regs. */
/* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */
swstack->r9 = 0;
/*
* We want to return into ret_from_sys_call after the _resume.
* ret_from_fork will call ret_from_sys_call.
*/
swstack->return_ip = (unsigned long) ret_from_fork;
/* Fix the user-mode and kernel-mode stackpointer. */
p->thread.usp = usp ?: rdusp();
p->thread.ksp = (unsigned long) swstack;
return 0;
}
unsigned long
get_wchan(struct task_struct *p)
{
/* TODO */
return 0;
}
#undef last_sched
#undef first_sched
void show_regs(struct pt_regs * regs)
{
unsigned long usp = rdusp();
show_regs_print_info(KERN_DEFAULT);
printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n",
regs->erp, regs->srp, regs->ccs, usp, regs->mof);
printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
regs->r0, regs->r1, regs->r2, regs->r3);
printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
regs->r4, regs->r5, regs->r6, regs->r7);
printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
regs->r8, regs->r9, regs->r10, regs->r11);
printk("r12: %08lx r13: %08lx oR10: %08lx\n",
regs->r12, regs->r13, regs->orig_r10);
}
| aberlemont/linux | arch/cris/arch-v32/kernel/process.c | C | gpl-2.0 | 4,772 |
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "dce/dce_11_2_d.h"
#include "dce/dce_11_2_sh_mask.h"
#include "gmc/gmc_8_1_sh_mask.h"
#include "gmc/gmc_8_1_d.h"
#include "include/logger_interface.h"
#include "dce112_compressor.h"
#define DCP_REG(reg)\
(reg + cp110->offsets.dcp_offset)
#define DMIF_REG(reg)\
(reg + cp110->offsets.dmif_offset)
static const struct dce112_compressor_reg_offsets reg_offsets[] = {
{
.dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
.dmif_offset =
(mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
- mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
},
{
.dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
.dmif_offset =
(mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
- mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
},
{
.dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
.dmif_offset =
(mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
- mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
}
};
static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
enum fbc_idle_force {
/* Bit 0 - Display registers updated */
FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
/* Bit 2 - FBC_GRPH_COMP_EN register updated */
FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
/* Bit 3 - FBC_SRC_SEL register updated */
FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
/* Bit 4 - FBC_MIN_COMPRESSION register updated */
FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
/* Bit 5 - FBC_ALPHA_COMP_EN register updated */
FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
/* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
/* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
/* Bit 24 - Memory write to region 0 defined by MC registers. */
FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
/* Bit 25 - Memory write to region 1 defined by MC registers */
FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
/* Bit 26 - Memory write to region 2 defined by MC registers */
FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
/* Bit 27 - Memory write to region 3 defined by MC registers. */
FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
/* Bit 28 - Memory write from any client other than MCIF */
FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
/* Bit 29 - CG statics screen signal is inactive */
FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
};
static uint32_t lpt_size_alignment(struct dce112_compressor *cp110)
{
/*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
return cp110->base.raw_size * cp110->base.banks_num *
cp110->base.dram_channels_num;
}
static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
uint32_t lpt_control)
{
/*LPT MC Config */
if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
/* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
* 00 - 1 CHANNEL
* 01 - 2 CHANNELS
* 02 - 4 OR 6 CHANNELS
* (Only for discrete GPU, N/A for CZ)
* 03 - 8 OR 12 CHANNELS
* (Only for discrete GPU, N/A for CZ) */
switch (cp110->base.dram_channels_num) {
case 2:
set_reg_field_value(
lpt_control,
1,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_PIPES);
break;
case 1:
set_reg_field_value(
lpt_control,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_PIPES);
break;
default:
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: Invalid LPT NUM_PIPES!!!",
__func__);
break;
}
/* The mapping for LPT NUM_BANKS is in
* GRPH_CONTROL.GRPH_NUM_BANKS register field
* Specifies the number of memory banks for tiling
* purposes. Only applies to 2D and 3D tiling modes.
* POSSIBLE VALUES:
* 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
* 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
* 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
* 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
switch (cp110->base.banks_num) {
case 16:
set_reg_field_value(
lpt_control,
3,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_BANKS);
break;
case 8:
set_reg_field_value(
lpt_control,
2,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_BANKS);
break;
case 4:
set_reg_field_value(
lpt_control,
1,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_BANKS);
break;
case 2:
set_reg_field_value(
lpt_control,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_NUM_BANKS);
break;
default:
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: Invalid LPT NUM_BANKS!!!",
__func__);
break;
}
/* The mapping is in DMIF_ADDR_CALC.
* ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
* Carrizo specifies the memory interleave per pipe.
* It effectively specifies the location of pipe bits in
* the memory address.
* POSSIBLE VALUES:
* 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
* interleave
* 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
* interleave
*/
switch (cp110->base.channel_interleave_size) {
case 256: /*256B */
set_reg_field_value(
lpt_control,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
break;
case 512: /*512B */
set_reg_field_value(
lpt_control,
1,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
break;
default:
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: Invalid LPT INTERLEAVE_SIZE!!!",
__func__);
break;
}
/* The mapping for LOW_POWER_TILING_ROW_SIZE is in
* DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
* for Carrizo. Specifies the size of dram row in bytes.
* This should match up with NOOFCOLS field in
* MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
* This register DMIF_ADDR_CALC is not used by the
* hardware as it is only used for addrlib assertions.
* POSSIBLE VALUES:
* 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
* boundary
* 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
* boundary
* 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
* boundary */
switch (cp110->base.raw_size) {
case 4096: /*4 KB */
set_reg_field_value(
lpt_control,
2,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ROW_SIZE);
break;
case 2048:
set_reg_field_value(
lpt_control,
1,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ROW_SIZE);
break;
case 1024:
set_reg_field_value(
lpt_control,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ROW_SIZE);
break;
default:
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: Invalid LPT ROW_SIZE!!!",
__func__);
break;
}
} else {
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: LPT MC Configuration is not provided",
__func__);
}
return lpt_control;
}
static bool is_source_bigger_than_epanel_size(
struct dce112_compressor *cp110,
uint32_t source_view_width,
uint32_t source_view_height)
{
if (cp110->base.embedded_panel_h_size != 0 &&
cp110->base.embedded_panel_v_size != 0 &&
((source_view_width * source_view_height) >
(cp110->base.embedded_panel_h_size *
cp110->base.embedded_panel_v_size)))
return true;
return false;
}
static uint32_t align_to_chunks_number_per_line(
struct dce112_compressor *cp110,
uint32_t pixels)
{
return 256 * ((pixels + 255) / 256);
}
static void wait_for_fbc_state_changed(
struct dce112_compressor *cp110,
bool enabled)
{
uint8_t counter = 0;
uint32_t addr = mmFBC_STATUS;
uint32_t value;
while (counter < 10) {
value = dm_read_reg(cp110->base.ctx, addr);
if (get_reg_field_value(
value,
FBC_STATUS,
FBC_ENABLE_STATUS) == enabled)
break;
udelay(10);
counter++;
}
if (counter == 10) {
dm_logger_write(
cp110->base.ctx->logger, LOG_WARNING,
"%s: wait counter exceeded, changes to HW not applied",
__func__);
}
}
void dce112_compressor_power_up_fbc(struct compressor *compressor)
{
uint32_t value;
uint32_t addr;
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
/* HW needs to do power measurement comparison. */
set_reg_field_value(
value,
0,
FBC_CNTL,
FBC_COMP_CLK_GATE_EN);
}
dm_write_reg(compressor->ctx, addr, value);
addr = mmFBC_COMP_MODE;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
dm_write_reg(compressor->ctx, addr, value);
addr = mmFBC_COMP_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
dm_write_reg(compressor->ctx, addr, value);
/*FBC_MIN_COMPRESSION 0 ==> 2:1 */
/* 1 ==> 4:1 */
/* 2 ==> 8:1 */
/* 0xF ==> 1:1 */
set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
dm_write_reg(compressor->ctx, addr, value);
compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
value = 0;
dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
value = 0xFFFFFF;
dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
}
void dce112_compressor_enable_fbc(
struct compressor *compressor,
uint32_t paths_num,
struct compr_addr_and_pitch_params *params)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
if (compressor->options.bits.FBC_SUPPORT &&
(compressor->options.bits.DUMMY_BACKEND == 0) &&
(!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
(!is_source_bigger_than_epanel_size(
cp110,
params->source_view_width,
params->source_view_height))) {
uint32_t addr;
uint32_t value;
/* Before enabling FBC first need to enable LPT if applicable
* LPT state should always be changed (enable/disable) while FBC
* is disabled */
if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
(params->source_view_width *
params->source_view_height <=
dce11_one_lpt_channel_max_resolution)) {
dce112_compressor_enable_lpt(compressor);
}
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
set_reg_field_value(
value,
params->inst,
FBC_CNTL, FBC_SRC_SEL);
dm_write_reg(compressor->ctx, addr, value);
/* Keep track of enum controller_id FBC is attached to */
compressor->is_enabled = true;
compressor->attached_inst = params->inst;
cp110->offsets = reg_offsets[params->inst];
/*Toggle it as there is bug in HW */
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
dm_write_reg(compressor->ctx, addr, value);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
dm_write_reg(compressor->ctx, addr, value);
wait_for_fbc_state_changed(cp110, true);
}
}
void dce112_compressor_disable_fbc(struct compressor *compressor)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
if (compressor->options.bits.FBC_SUPPORT &&
dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
/* Reset enum controller_id to undefined */
compressor->attached_inst = 0;
compressor->is_enabled = false;
/* Whenever disabling FBC make sure LPT is disabled if LPT
* supported */
if (compressor->options.bits.LPT_SUPPORT)
dce112_compressor_disable_lpt(compressor);
wait_for_fbc_state_changed(cp110, false);
}
}
bool dce112_compressor_is_fbc_enabled_in_hw(
struct compressor *compressor,
uint32_t *inst)
{
/* Check the hardware register */
uint32_t value;
value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
if (inst != NULL)
*inst = compressor->attached_inst;
return true;
}
value = dm_read_reg(compressor->ctx, mmFBC_MISC);
if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
if (inst != NULL)
*inst =
compressor->attached_inst;
return true;
}
}
return false;
}
bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
{
/* Check the hardware register */
uint32_t value = dm_read_reg(compressor->ctx,
mmLOW_POWER_TILING_CONTROL);
return get_reg_field_value(
value,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ENABLE);
}
void dce112_compressor_program_compressed_surface_address_and_pitch(
struct compressor *compressor,
struct compr_addr_and_pitch_params *params)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
uint32_t value = 0;
uint32_t fbc_pitch = 0;
uint32_t compressed_surf_address_low_part =
compressor->compr_surface_address.addr.low_part;
/* Clear content first. */
dm_write_reg(
compressor->ctx,
DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
0);
dm_write_reg(compressor->ctx,
DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
if (compressor->options.bits.LPT_SUPPORT) {
uint32_t lpt_alignment = lpt_size_alignment(cp110);
if (lpt_alignment != 0) {
compressed_surf_address_low_part =
((compressed_surf_address_low_part
+ (lpt_alignment - 1)) / lpt_alignment)
* lpt_alignment;
}
}
/* Write address, HIGH has to be first. */
dm_write_reg(compressor->ctx,
DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
compressor->compr_surface_address.addr.high_part);
dm_write_reg(compressor->ctx,
DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
compressed_surf_address_low_part);
fbc_pitch = align_to_chunks_number_per_line(
cp110,
params->source_view_width);
if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
fbc_pitch = fbc_pitch / 8;
else
dm_logger_write(
compressor->ctx->logger, LOG_WARNING,
"%s: Unexpected DCE11 compression ratio",
__func__);
/* Clear content first. */
dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
/* Write FBC Pitch. */
set_reg_field_value(
value,
fbc_pitch,
GRPH_COMPRESS_PITCH,
GRPH_COMPRESS_PITCH);
dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
}
void dce112_compressor_disable_lpt(struct compressor *compressor)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
uint32_t value;
uint32_t addr;
uint32_t inx;
/* Disable all pipes LPT Stutter */
for (inx = 0; inx < 3; inx++) {
value =
dm_read_reg(
compressor->ctx,
DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
set_reg_field_value(
value,
0,
DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
STUTTER_ENABLE_NONLPTCH);
dm_write_reg(
compressor->ctx,
DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
value);
}
/* Disable Underlay pipe LPT Stutter */
addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
0,
DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
STUTTER_ENABLE_NONLPTCH);
dm_write_reg(compressor->ctx, addr, value);
/* Disable LPT */
addr = mmLOW_POWER_TILING_CONTROL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ENABLE);
dm_write_reg(compressor->ctx, addr, value);
/* Clear selection of Channel(s) containing Compressed Surface */
addr = mmGMCON_LPT_TARGET;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
0xFFFFFFFF,
GMCON_LPT_TARGET,
STCTRL_LPT_TARGET);
dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
}
void dce112_compressor_enable_lpt(struct compressor *compressor)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
uint32_t value;
uint32_t addr;
uint32_t value_control;
uint32_t channels;
/* Enable LPT Stutter from Display pipe */
value = dm_read_reg(compressor->ctx,
DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
set_reg_field_value(
value,
1,
DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
STUTTER_ENABLE_NONLPTCH);
dm_write_reg(compressor->ctx,
DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
/* Enable Underlay pipe LPT Stutter */
addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
1,
DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
STUTTER_ENABLE_NONLPTCH);
dm_write_reg(compressor->ctx, addr, value);
/* Selection of Channel(s) containing Compressed Surface: 0xfffffff
* will disable LPT.
* STCTRL_LPT_TARGETn corresponds to channel n. */
addr = mmLOW_POWER_TILING_CONTROL;
value_control = dm_read_reg(compressor->ctx, addr);
channels = get_reg_field_value(value_control,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_MODE);
addr = mmGMCON_LPT_TARGET;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
channels + 1, /* not mentioned in programming guide,
but follow DCE8.1 */
GMCON_LPT_TARGET,
STCTRL_LPT_TARGET);
dm_write_reg(compressor->ctx, addr, value);
/* Enable LPT */
addr = mmLOW_POWER_TILING_CONTROL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
1,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ENABLE);
dm_write_reg(compressor->ctx, addr, value);
}
void dce112_compressor_program_lpt_control(
struct compressor *compressor,
struct compr_addr_and_pitch_params *params)
{
struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
uint32_t rows_per_channel;
uint32_t lpt_alignment;
uint32_t source_view_width;
uint32_t source_view_height;
uint32_t lpt_control = 0;
if (!compressor->options.bits.LPT_SUPPORT)
return;
lpt_control = dm_read_reg(compressor->ctx,
mmLOW_POWER_TILING_CONTROL);
/* POSSIBLE VALUES for Low Power Tiling Mode:
* 00 - Use channel 0
* 01 - Use Channel 0 and 1
* 02 - Use Channel 0,1,2,3
* 03 - reserved */
switch (compressor->lpt_channels_num) {
/* case 2:
* Use Channel 0 & 1 / Not used for DCE 11 */
case 1:
/*Use Channel 0 for LPT for DCE 11 */
set_reg_field_value(
lpt_control,
0,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_MODE);
break;
default:
dm_logger_write(
compressor->ctx->logger, LOG_WARNING,
"%s: Invalid selected DRAM channels for LPT!!!",
__func__);
break;
}
lpt_control = lpt_memory_control_config(cp110, lpt_control);
/* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
* FBC compressed surface pitch.
* LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
* Surface Pitch) / (Row Size * Number of Channels *
* Number of Banks)). */
rows_per_channel = 0;
lpt_alignment = lpt_size_alignment(cp110);
source_view_width =
align_to_chunks_number_per_line(
cp110,
params->source_view_width);
source_view_height = (params->source_view_height + 1) & (~0x1);
if (lpt_alignment != 0) {
rows_per_channel = source_view_width * source_view_height * 4;
rows_per_channel =
(rows_per_channel % lpt_alignment) ?
(rows_per_channel / lpt_alignment + 1) :
rows_per_channel / lpt_alignment;
}
set_reg_field_value(
lpt_control,
rows_per_channel,
LOW_POWER_TILING_CONTROL,
LOW_POWER_TILING_ROWS_PER_CHAN);
dm_write_reg(compressor->ctx,
mmLOW_POWER_TILING_CONTROL, lpt_control);
}
/*
* DCE 11 Frame Buffer Compression Implementation
*/
void dce112_compressor_set_fbc_invalidation_triggers(
struct compressor *compressor,
uint32_t fbc_trigger)
{
/* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
* for DCE 11 regions cannot be used - does not work with S/G
*/
uint32_t addr = mmFBC_CLIENT_REGION_MASK;
uint32_t value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
0,
FBC_CLIENT_REGION_MASK,
FBC_MEMORY_REGION_MASK);
dm_write_reg(compressor->ctx, addr, value);
/* Setup events when to clear all CSM entries (effectively marking
* current compressed data invalid)
* For DCE 11 CSM metadata 11111 means - "Not Compressed"
* Used as the initial value of the metadata sent to the compressor
* after invalidation, to indicate that the compressor should attempt
* to compress all chunks on the current pass. Also used when the chunk
* is not successfully written to memory.
* When this CSM value is detected, FBC reads from the uncompressed
* buffer. Set events according to passed in value, these events are
* valid for DCE11:
* - bit 0 - display register updated
* - bit 28 - memory write from any client except from MCIF
* - bit 29 - CG static screen signal is inactive
* In addition, DCE11.1 also needs to set new DCE11.1 specific events
* that are used to trigger invalidation on certain register changes,
* for example enabling of Alpha Compression may trigger invalidation of
* FBC once bit is set. These events are as follows:
* - Bit 2 - FBC_GRPH_COMP_EN register updated
* - Bit 3 - FBC_SRC_SEL register updated
* - Bit 4 - FBC_MIN_COMPRESSION register updated
* - Bit 5 - FBC_ALPHA_COMP_EN register updated
* - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
* - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
*/
addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
fbc_trigger |
FBC_IDLE_FORCE_GRPH_COMP_EN |
FBC_IDLE_FORCE_SRC_SEL_CHANGE |
FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
FBC_IDLE_FORCE_ALPHA_COMP_EN |
FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
FBC_IDLE_FORCE_CLEAR_MASK,
FBC_IDLE_FORCE_CLEAR_MASK);
dm_write_reg(compressor->ctx, addr, value);
}
void dce112_compressor_construct(struct dce112_compressor *compressor,
struct dc_context *ctx)
{
struct dc_bios *bp = ctx->dc_bios;
struct embedded_panel_info panel_info;
compressor->base.options.raw = 0;
compressor->base.options.bits.FBC_SUPPORT = true;
compressor->base.options.bits.LPT_SUPPORT = true;
/* For DCE 11 always use one DRAM channel for LPT */
compressor->base.lpt_channels_num = 1;
compressor->base.options.bits.DUMMY_BACKEND = false;
/* Check if this system has more than 1 DRAM channel; if only 1 then LPT
* should not be supported */
if (compressor->base.memory_bus_width == 64)
compressor->base.options.bits.LPT_SUPPORT = false;
compressor->base.options.bits.CLK_GATING_DISABLED = false;
compressor->base.ctx = ctx;
compressor->base.embedded_panel_h_size = 0;
compressor->base.embedded_panel_v_size = 0;
compressor->base.memory_bus_width = ctx->asic_id.vram_width;
compressor->base.allocated_size = 0;
compressor->base.preferred_requested_size = 0;
compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
compressor->base.banks_num = 0;
compressor->base.raw_size = 0;
compressor->base.channel_interleave_size = 0;
compressor->base.dram_channels_num = 0;
compressor->base.lpt_channels_num = 0;
compressor->base.attached_inst = 0;
compressor->base.is_enabled = false;
if (BP_RESULT_OK ==
bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
compressor->base.embedded_panel_h_size =
panel_info.lcd_timing.horizontal_addressable;
compressor->base.embedded_panel_v_size =
panel_info.lcd_timing.vertical_addressable;
}
}
struct compressor *dce112_compressor_create(struct dc_context *ctx)
{
struct dce112_compressor *cp110 =
kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL);
if (!cp110)
return NULL;
dce112_compressor_construct(cp110, ctx);
return &cp110->base;
}
void dce112_compressor_destroy(struct compressor **compressor)
{
kfree(TO_DCE112_COMPRESSOR(*compressor));
*compressor = NULL;
}
| hannes/linux | drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c | C | gpl-2.0 | 25,103 |
// @(#)root/tmva $Id$
// Author: Andreas Hoecker, Joerg Stelzer, Helge Voss
/**********************************************************************************
* Project: TMVA - a Root-integrated toolkit for multivariate data analysis *
* Package: TMVA *
* Class : Configurable *
* Web : http://tmva.sourceforge.net *
* *
* Description: *
* Base class for all classes with option parsing *
* *
* Authors (alphabetical): *
* Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland *
* Joerg Stelzer <Joerg.Stelzer@cern.ch> - CERN, Switzerland *
* Helge Voss <Helge.Voss@cern.ch> - MPI-K Heidelberg, Germany *
* *
* Copyright (c) 2005: *
* CERN, Switzerland *
* MPI-K Heidelberg, Germany *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted according to the terms listed in LICENSE *
* (http://tmva.sourceforge.net/LICENSE) *
**********************************************************************************/
#ifndef ROOT_TMVA_Configurable
#define ROOT_TMVA_Configurable
//////////////////////////////////////////////////////////////////////////
// //
// Configurable //
// //
// Base class for all classes with option parsing //
// //
//////////////////////////////////////////////////////////////////////////
#include "TNamed.h"
#include "TList.h"
#include "TMVA/Option.h"
namespace TMVA {
class Configurable : public TNamed {
public:
// constructur
Configurable( const TString& theOption = "" );
// default destructur
virtual ~Configurable();
// parse the internal option string
virtual void ParseOptions();
// print list of defined options
void PrintOptions() const;
const char* GetConfigName() const { return GetName(); }
const char* GetConfigDescription() const { return fConfigDescription; }
void SetConfigName ( const char* n ) { SetName(n); }
void SetConfigDescription( const char* d ) { fConfigDescription = TString(d); }
// Declare option and bind it to a variable
template<class T>
OptionBase* DeclareOptionRef( T& ref, const TString& name, const TString& desc = "" );
template<class T>
OptionBase* DeclareOptionRef( T*& ref, Int_t size, const TString& name, const TString& desc = "" );
// Add a predefined value to the last declared option
template<class T>
void AddPreDefVal(const T&);
// Add a predefined value to the option named optname
template<class T>
void AddPreDefVal(const TString&optname ,const T&);
void CheckForUnusedOptions() const;
const TString& GetOptions() const { return fOptions; }
void SetOptions(const TString& s) { fOptions = s; }
void WriteOptionsToStream ( std::ostream& o, const TString& prefix ) const;
void ReadOptionsFromStream( std::istream& istr );
void AddOptionsXMLTo( void* parent ) const;
void ReadOptionsFromXML( void* node );
protected:
Bool_t LooseOptionCheckingEnabled() const { return fLooseOptionCheckingEnabled; }
void EnableLooseOptions( Bool_t b = kTRUE ) { fLooseOptionCheckingEnabled = b; }
void WriteOptionsReferenceToFile();
void ResetSetFlag();
const TString& GetReferenceFile() const { return fReferenceFile; }
private:
// splits the option string at ':' and fills the list 'loo' with the primitive strings
void SplitOptions(const TString& theOpt, TList& loo) const;
TString fOptions; // options string
Bool_t fLooseOptionCheckingEnabled; // checker for option string
// classes and method related to easy and flexible option parsing
OptionBase* fLastDeclaredOption; //! last declared option
TList fListOfOptions; // option list
TString fConfigDescription; // description of this configurable
TString fReferenceFile; // reference file for options writing
public:
// the mutable declaration is needed to use the logger in const methods
MsgLogger& Log() const { return *fLogger; }
// set message type
void SetMsgType( EMsgType t ) { fLogger->SetMinType(t); }
protected:
mutable MsgLogger* fLogger; //! message logger
private:
template <class T>
void AssignOpt( const TString& name, T& valAssign ) const;
public:
ClassDef(Configurable,1); // Virtual base class for all TMVA method
};
} // namespace TMVA
// Template Declarations go here
//______________________________________________________________________
template <class T>
TMVA::OptionBase* TMVA::Configurable::DeclareOptionRef( T& ref, const TString& name, const TString& desc)
{
// set the reference for an option
OptionBase* o = new Option<T>(ref, name, desc);
fListOfOptions.Add(o);
fLastDeclaredOption = o;
return o;
}
template <class T>
TMVA::OptionBase* TMVA::Configurable::DeclareOptionRef( T*& ref, Int_t size, const TString& name, const TString& desc)
{
// set the reference for an option
OptionBase* o = new Option<T*>(ref, size, name, desc);
fListOfOptions.Add(o);
fLastDeclaredOption = o;
return o;
}
//______________________________________________________________________
template<class T>
void TMVA::Configurable::AddPreDefVal(const T& val)
{
// add predefined option value to the last declared option
Option<T>* oc = dynamic_cast<Option<T>*>(fLastDeclaredOption);
if(oc!=0) oc->AddPreDefVal(val);
}
//______________________________________________________________________
template<class T>
void TMVA::Configurable::AddPreDefVal(const TString &optname, const T& val)
{
// add predefined option value to the option named optname
TListIter optIt( &fListOfOptions );
while (OptionBase * op = (OptionBase *) optIt()) {
if (optname == TString(op->TheName())){
Option<T>* oc = dynamic_cast<Option<T>*>(op);
if(oc!=0){
oc->AddPreDefVal(val);
return;
}
else{
Log() << kFATAL << "Option \"" << optname
<< "\" was found, but somehow I could not convert the pointer propperly.. please check the syntax of your option declaration" << Endl;
return;
}
}
}
Log() << kFATAL << "Option \"" << optname
<< "\" is not declared, hence cannot add predefined value, please check the syntax of your option declaration" << Endl;
}
//______________________________________________________________________
template <class T>
void TMVA::Configurable::AssignOpt(const TString& name, T& valAssign) const
{
// assign an option
TObject* opt = fListOfOptions.FindObject(name);
if (opt!=0) valAssign = ((Option<T>*)opt)->Value();
else
Log() << kFATAL << "Option \"" << name
<< "\" not declared, please check the syntax of your option string" << Endl;
}
#endif
| beniz/root | tmva/tmva/inc/TMVA/Configurable.h | C | lgpl-2.1 | 8,255 |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csi
import (
"crypto/sha256"
"errors"
"fmt"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1alpha1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/volume"
)
type csiAttacher struct {
plugin *csiPlugin
k8s kubernetes.Interface
waitSleepTime time.Duration
}
// volume.Attacher methods
var _ volume.Attacher = &csiAttacher{}
func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
if spec == nil {
glog.Error(log("attacher.Attach missing volume.Spec"))
return "", errors.New("missing spec")
}
csiSource, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err))
return "", err
}
node := string(nodeName)
pvName := spec.PersistentVolume.GetName()
attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node)
attachment := &storage.VolumeAttachment{
ObjectMeta: meta.ObjectMeta{
Name: attachID,
},
Spec: storage.VolumeAttachmentSpec{
NodeName: node,
Attacher: csiSource.Driver,
Source: storage.VolumeAttachmentSource{
PersistentVolumeName: &pvName,
},
},
Status: storage.VolumeAttachmentStatus{Attached: false},
}
_, err = c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment)
alreadyExist := false
if err != nil {
if !apierrs.IsAlreadyExists(err) {
glog.Error(log("attacher.Attach failed: %v", err))
return "", err
}
alreadyExist = true
}
if alreadyExist {
glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle))
} else {
glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle))
}
// probe for attachment update here
// NOTE: any error from waiting for attachment is logged only. This is because
// the primariy intent of the enclosing method is to create VolumeAttachment.
// DONOT return that error here as it is mitigated in attacher.WaitForAttach.
volAttachmentOK := true
if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil {
volAttachmentOK = false
glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err))
}
glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID))
return attachID, nil
}
func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) {
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err))
return "", err
}
return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout)
}
func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
ticker := time.NewTicker(c.waitSleepTime)
defer ticker.Stop()
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
for {
select {
case <-ticker.C:
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.WaitForAttach failed (will continue to try): %v", err))
continue
}
// if being deleted, fail fast
if attach.GetDeletionTimestamp() != nil {
glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID))
return "", errors.New("volume attachment is being deleted")
}
// attachment OK
if attach.Status.Attached {
return attachID, nil
}
// driver reports attach error
attachErr := attach.Status.AttachError
if attachErr != nil {
glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message))
return "", errors.New(attachErr.Message)
}
case <-timer.C:
glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs)))
attached := make(map[*volume.Spec]bool)
for _, spec := range specs {
if spec == nil {
glog.Error(log("attacher.VolumesAreAttached missing volume.Spec"))
return nil, errors.New("missing spec")
}
source, err := getCSISourceFromSpec(spec)
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed: %v", err))
continue
}
attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName))
glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))
continue
}
glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached))
attached[spec] = attach.Status.Attached
}
return attached, nil
}
func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
glog.V(4).Info(log("attacher.GetDeviceMountPath is not implemented"))
return "", nil
}
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
glog.V(4).Info(log("attacher.MountDevice is not implemented"))
return nil
}
var _ volume.Detacher = &csiAttacher{}
func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
// volumeName in format driverName<SEP>volumeHandle generated by plugin.GetVolumeName()
if volumeName == "" {
glog.Error(log("detacher.Detach missing value for parameter volumeName"))
return errors.New("missing exepected parameter volumeName")
}
parts := strings.Split(volumeName, volNameSep)
if len(parts) != 2 {
glog.Error(log("detacher.Detach insufficient info encoded in volumeName"))
return errors.New("volumeName missing expected data")
}
driverName := parts[0]
volID := parts[1]
attachID := getAttachmentName(volID, driverName, string(nodeName))
if err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil); err != nil {
glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err))
return err
}
glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
return c.waitForVolumeDetachment(volID, attachID)
}
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error {
glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
ticker := time.NewTicker(c.waitSleepTime)
defer ticker.Stop()
timeout := c.waitSleepTime * 10
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop()
//TODO (vladimirvivien) instead of polling api-server, change to a api-server watch
for {
select {
case <-ticker.C:
glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID))
attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
//object deleted or never existed, done
glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle))
return nil
}
glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err))
continue
}
// driver reports attach error
detachErr := attach.Status.DetachError
if detachErr != nil {
glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message))
return errors.New(detachErr.Message)
}
case <-timer.C:
glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID))
return fmt.Errorf("detachment timed out for volume %v", volumeHandle)
}
}
}
func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
glog.V(4).Info(log("detacher.UnmountDevice is not implemented"))
return nil
}
// getAttachmentName returns csi-<sha252(volName,csiDriverName,NodeName>
func getAttachmentName(volName, csiDriverName, nodeName string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName)))
return fmt.Sprintf("csi-%x", result)
}
| aleksandra-malinowska/autoscaler | vertical-pod-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go | GO | apache-2.0 | 9,693 |
#include "debug.h"
#include <stdarg.h>
#include <windows.h>
#include <stdio.h> /* vsprintf */
#define DPRINTF_BUF_SZ 1024
void OutputDebugStringf(char *fmt, ...)
{
#ifdef _DEBUG
va_list args;
char buf[DPRINTF_BUF_SZ];
va_start(args, fmt);
vsprintf(buf, fmt, args);
OutputDebugString(buf);
#endif
}
| ninetian/ffmpeginstaller | xvidcore/dshow/src/debug.c | C | apache-2.0 | 307 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.cluster.coordination.flow;
import org.apache.nifi.cluster.protocol.DataFlow;
import org.apache.nifi.cluster.protocol.NodeIdentifier;
/**
* <p>
* A FlowElection is responsible for examining multiple versions of a dataflow and determining which of
* the versions is the "correct" version of the flow.
* </p>
*/
public interface FlowElection {
/**
* Checks if the election has completed or not.
*
* @return <code>true</code> if the election has completed, <code>false</code> otherwise.
*/
boolean isElectionComplete();
/**
* Returns <code>true</code> if a vote has already been counted for the given Node Identifier, <code>false</code> otherwise.
*
* @param nodeIdentifier the identifier of the node
* @return <code>true</code> if a vote has already been counted for the given Node Identifier, <code>false</code> otherwise.
*/
boolean isVoteCounted(NodeIdentifier nodeIdentifier);
/**
* If the election has not yet completed, adds the given DataFlow to the list of candidates
* (if it is not already in the running) and increments the number of votes for this DataFlow by 1.
* If the election has completed, the given candidate is ignored, and the already-elected DataFlow
* will be returned. If the election has not yet completed, a vote will be cast for the given
* candidate and <code>null</code> will be returned, signifying that no candidate has yet been chosen.
*
* @param candidate the DataFlow to vote for and add to the pool of candidates if not already present
* @param nodeIdentifier the identifier of the node casting the vote
*
* @return the elected {@link DataFlow}, or <code>null</code> if no DataFlow has yet been elected
*/
DataFlow castVote(DataFlow candidate, NodeIdentifier nodeIdentifier);
/**
* Returns the DataFlow that has been elected as the "correct" version of the flow, or <code>null</code>
* if the election has not yet completed.
*
* @return the DataFlow that has been elected as the "correct" version of the flow, or <code>null</code>
* if the election has not yet completed.
*/
DataFlow getElectedDataFlow();
/**
* Returns a human-readable description of the status of the election
*
* @return a human-readable description of the status of the election
*/
String getStatusDescription();
}
| WilliamNouet/nifi | nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/flow/FlowElection.java | Java | apache-2.0 | 3,255 |
<?php
/**
* This file is part of PHPWord - A pure PHP library for reading and writing
* word processing documents.
*
* PHPWord is free software distributed under the terms of the GNU Lesser
* General Public License version 3 as published by the Free Software Foundation.
*
* For the full copyright and license information, please read the LICENSE
* file that was distributed with this source code. For the full list of
* contributors, visit https://github.com/PHPOffice/PHPWord/contributors.
*
* @link https://github.com/PHPOffice/PHPWord
* @copyright 2010-2014 PHPWord contributors
* @license http://www.gnu.org/licenses/lgpl.txt LGPL version 3
*/
namespace PhpOffice\PhpWord\Tests\Exception;
use PhpOffice\PhpWord\Exception\InvalidStyleException;
/**
* Test class for PhpOffice\PhpWord\Exception\InvalidStyleException
*
* @coversDefaultClass \PhpOffice\PhpWord\Exception\InvalidStyleException
* @runTestsInSeparateProcesses
*/
class InvalidStyleExceptionTest extends \PHPUnit_Framework_TestCase
{
/**
* Throw new exception
*
* @expectedException \PhpOffice\PhpWord\Exception\InvalidStyleException
* @covers \PhpOffice\PhpWord\Exception\InvalidStyleException
*/
public function testThrowException()
{
throw new InvalidStyleException;
}
}
| vikramraj87/refgen3 | vendor/phpoffice/phpword/tests/PhpWord/Tests/Exception/InvalidStyleExceptionTest.php | PHP | bsd-3-clause | 1,334 |
<!DOCTYPE html>
<link rel="help" href="https://drafts.csswg.org/css-backgrounds/#border-image-slice" />
<link rel="help" href="https://drafts.csswg.org/css-backgrounds/#border-image" />
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<style>
div {
border: 1px solid;
border-image-slice: 1;
}
div {
/* Should reset border-image-slice */
border-image: linear-gradient(black, black);
}
</style>
<div>This text should not have a border, just corner dots</div>
<script>
test(() => {
assert_equals(getComputedStyle(document.querySelector("div")).borderImageSlice, "100%");
}, "Check that the border-image shorthand resets border-image-slice to its initial value.");
</script>
| scheib/chromium | third_party/blink/web_tests/external/wpt/css/css-backgrounds/border-image-slice-shorthand-reset.html | HTML | bsd-3-clause | 760 |
#include <sys/mman.h>
#include "syscall.h"
int mlockall(int flags)
{
return syscall(SYS_mlockall, flags);
}
| youtube/cobalt | third_party/musl/src/mman/mlockall.c | C | bsd-3-clause | 110 |
(function ($, Drupal) {
/**
* Toggle show/hide links for off canvas layout.
*/
Drupal.behaviors.omegaOffCanvasLayout = {
attach: function (context) {
$('#off-canvas').click(function(e) {
if (!$(this).hasClass('is-visible')) {
$(this).addClass('is-visible');
e.preventDefault();
e.stopPropagation();
}
});
$('#off-canvas-hide').click(function(e) {
$(this).parent().removeClass('is-visible');
e.preventDefault();
e.stopPropagation();
});
$('.l-page').click(function(e) {
if($('#off-canvas').hasClass('is-visible') && $(e.target).closest('#off-canvas').length === 0) {
$('#off-canvas').removeClass('is-visible');
e.stopPropagation();
}
});
}
};
})(jQuery, Drupal);
| sgurlt/drupal-sandbox | www/sites/all/themes/omega/omega/layouts/off-canvas/assets/off-canvas.js | JavaScript | gpl-2.0 | 827 |
/*************************************************************************/
/* reference.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* http://www.godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2015 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef REFERENCE_H
#define REFERENCE_H
#include "object.h"
#include "safe_refcount.h"
#include "ref_ptr.h"
#include "object_type_db.h"
/**
@author Juan Linietsky <reduzio@gmail.com>
*/
class Reference : public Object{
OBJ_TYPE( Reference, Object );
friend class RefBase;
SafeRefCount refcount;
SafeRefCount refcount_init;
protected:
static void _bind_methods();
public:
_FORCE_INLINE_ bool is_referenced() const { return refcount_init.get()<1; }
bool init_ref();
void reference();
bool unreference();
int reference_get_count() const;
Reference();
~Reference();
};
#if 0
class RefBase {
protected:
void ref_inc(Reference *p_reference);
bool ref_dec(Reference *p_reference);
Reference *first_ref(Reference *p_reference);
Reference * get_reference_from_ref(const RefBase &p_base);
virtual Reference * get_reference() const=0;
char * get_refptr_data(const RefPtr &p_refptr) const;
public:
virtual ~RefBase() {}
};
#endif
template<class T>
class Ref {
T *reference;
void ref( const Ref& p_from ) {
if (p_from.reference==reference)
return;
unref();
reference=p_from.reference;
if (reference)
reference->reference();
}
void ref_pointer( T* p_ref ) {
ERR_FAIL_COND(!p_ref);
if (p_ref->init_ref())
reference=p_ref;
}
//virtual Reference * get_reference() const { return reference; }
public:
_FORCE_INLINE_ bool operator<(const Ref<T>& p_r) const {
return reference<p_r.reference;
}
_FORCE_INLINE_ bool operator==(const Ref<T>& p_r) const {
return reference==p_r.reference;
}
_FORCE_INLINE_ bool operator!=(const Ref<T>& p_r) const {
return reference!=p_r.reference;
}
_FORCE_INLINE_ T* operator->() {
return reference;
}
_FORCE_INLINE_ T* operator*() {
return reference;
}
_FORCE_INLINE_ const T* operator->() const {
return reference;
}
_FORCE_INLINE_ const T* ptr() const {
return reference;
}
_FORCE_INLINE_ T* ptr() {
return reference;
}
_FORCE_INLINE_ const T* operator*() const {
return reference;
}
RefPtr get_ref_ptr() const {
RefPtr refptr;
Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() );
*irr = *this;
return refptr;
};
#if 0
// go to RefPtr
operator RefPtr() const {
return get_ref_ptr();
}
#endif
#if 1
operator Variant() const {
return Variant( get_ref_ptr() );
}
#endif
void operator=( const Ref& p_from ) {
ref(p_from);
}
template<class T_Other>
void operator=( const Ref<T_Other>& p_from ) {
Reference *refb = const_cast<Reference*>(static_cast<const Reference*>(p_from.ptr()));
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
void operator=( const RefPtr& p_refptr ) {
Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( p_refptr.get_data() );
Reference *refb = irr->ptr();
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
void operator=( const Variant& p_variant ) {
RefPtr refptr=p_variant;
Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() );
Reference *refb = irr->ptr();
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
Ref( const Ref& p_from ) {
reference=NULL;
ref(p_from);
}
template<class T_Other>
Ref( const Ref<T_Other>& p_from ) {
reference=NULL;
Reference *refb = const_cast<Reference*>(static_cast<const Reference*>(p_from.ptr()));
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
Ref( T* p_reference ) {
if (p_reference)
ref_pointer(p_reference);
else
reference=NULL;
}
Ref( const Variant& p_variant) {
RefPtr refptr=p_variant;
Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( refptr.get_data() );
reference=NULL;
Reference *refb = irr->ptr();
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
Ref( const RefPtr& p_refptr) {
Ref<Reference> * irr = reinterpret_cast<Ref<Reference>*>( p_refptr.get_data() );
reference=NULL;
Reference *refb = irr->ptr();
if (!refb) {
unref();
return;
}
Ref r;
r.reference=refb->cast_to<T>();
ref(r);
r.reference=NULL;
}
inline bool is_valid() const { return reference!=NULL; }
inline bool is_null() const { return reference==NULL; }
void unref() {
//TODO this should be moved to mutexes, since this engine does not really
// do a lot of referencing on references and stuff
// mutexes will avoid more crashes?
if (reference && reference->unreference()) {
memdelete(reference);
}
reference=NULL;
}
void instance() {
ref( memnew( T ));
}
Ref() {
reference=NULL;
}
~Ref() {
unref();
}
};
typedef Ref<Reference> REF;
class WeakRef : public Reference {
OBJ_TYPE(WeakRef,Reference);
ObjectID ref;
protected:
static void _bind_methods();
public:
Variant get_ref() const;
void set_obj(Object *p_object);
void set_ref(const REF& p_ref);
WeakRef();
};
#endif // REFERENCE_H
| a12n/godot | core/reference.h | C | mit | 7,264 |
/// <reference path="MediaStream.d.ts" />
/// <reference path="RTCPeerConnection.d.ts" />
var config: RTCConfiguration =
{ iceServers: [{ urls: "stun.l.google.com:19302" }] };
var constraints: RTCMediaConstraints =
{ mandatory: { offerToReceiveAudio: true, offerToReceiveVideo: true } };
var peerConnection: RTCPeerConnection =
new RTCPeerConnection(config, constraints);
navigator.getUserMedia({ audio: true, video: true },
stream => {
peerConnection.addStream(stream);
},
error => {
console.log('Error message: ' + error.message);
console.log('Error name: ' + error.name);
});
peerConnection.onaddstream = ev => console.log(ev.type);
peerConnection.ondatachannel = ev => console.log(ev.type);
peerConnection.oniceconnectionstatechange = ev => console.log(ev.type);
peerConnection.onnegotiationneeded = ev => console.log(ev.type);
peerConnection.onopen = ev => console.log(ev.type);
peerConnection.onicecandidate = ev => console.log(ev.type);
peerConnection.onremovestream = ev => console.log(ev.type);
peerConnection.onstatechange = ev => console.log(ev.type);
peerConnection.createOffer(
offer => {
peerConnection.setLocalDescription(offer,
() => console.log("set local description"),
error => console.log("Error setting local description: " + error));
},
error => console.log("Error creating offer: " + error));
var type: string = RTCSdpType[RTCSdpType.offer];
var offer: RTCSessionDescriptionInit = { type: type, sdp: "some sdp" };
var sessionDescription = new RTCSessionDescription(offer);
peerConnection.setRemoteDescription(sessionDescription, () => {
peerConnection.createAnswer(
answer => {
peerConnection.setLocalDescription(answer,
() => console.log('Set local description'),
error => console.log(
"Error setting local description from created answer: " + error +
"; answer.sdp=" + answer.sdp));
},
error => console.log("Error creating answer: " + error));
},
error => console.log('Error setting remote description: ' + error +
"; offer.sdp=" + offer.sdp));
var webkitSessionDescription = new webkitRTCSessionDescription(offer);
peerConnection.setRemoteDescription(webkitSessionDescription, () => {
peerConnection.createAnswer(
answer => {
peerConnection.setLocalDescription(answer,
() => console.log('Set local description'),
error => console.log(
"Error setting local description from created answer: " + error +
"; answer.sdp=" + answer.sdp));
},
error => console.log("Error creating answer: " + error));
},
error => console.log('Error setting remote description: ' + error +
"; offer.sdp=" + offer.sdp));
var mozSessionDescription = new mozRTCSessionDescription(offer);
peerConnection.setRemoteDescription(mozSessionDescription, () => {
peerConnection.createAnswer(
answer => {
peerConnection.setLocalDescription(answer,
() => console.log('Set local description'),
error => console.log(
"Error setting local description from created answer: " + error +
"; answer.sdp=" + answer.sdp));
},
error => console.log("Error creating answer: " + error));
},
error => console.log('Error setting remote description: ' + error +
"; offer.sdp=" + offer.sdp));
var wkPeerConnection: webkitRTCPeerConnection =
new webkitRTCPeerConnection(config, constraints);
| ryan-codingintrigue/DefinitelyTyped | webrtc/RTCPeerConnection-tests.ts | TypeScript | mit | 3,396 |
/*!
* OOjs UI v0.12.4
* https://www.mediawiki.org/wiki/OOjs_UI
*
* Copyright 2011–2015 OOjs UI Team and other contributors.
* Released under the MIT license
* http://oojs.mit-license.org
*
* Date: 2015-08-13T21:01:12Z
*/
.oo-ui-icon-bigger {
background-image: url("themes/mediawiki/images/icons/bigger-rtl.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bigger-rtl.png");
}
.oo-ui-icon-bigger-invert {
background-image: url("themes/mediawiki/images/icons/bigger-rtl-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bigger-rtl-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bigger-rtl-invert.png");
}
.oo-ui-icon-smaller {
background-image: url("themes/mediawiki/images/icons/smaller-rtl.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/smaller-rtl.png");
}
.oo-ui-icon-smaller-invert {
background-image: url("themes/mediawiki/images/icons/smaller-rtl-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/smaller-rtl-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/smaller-rtl-invert.png");
}
.oo-ui-icon-subscript {
background-image: url("themes/mediawiki/images/icons/subscript-rtl.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/subscript-rtl.png");
}
.oo-ui-icon-subscript-invert {
background-image: url("themes/mediawiki/images/icons/subscript-rtl-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/subscript-rtl-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/subscript-rtl-invert.png");
}
.oo-ui-icon-superscript {
background-image: url("themes/mediawiki/images/icons/superscript-rtl.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/superscript-rtl.png");
}
.oo-ui-icon-superscript-invert {
background-image: url("themes/mediawiki/images/icons/superscript-rtl-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/superscript-rtl-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/superscript-rtl-invert.png");
}
.oo-ui-icon-bold {
background-image: url("themes/mediawiki/images/icons/bold-a.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-a.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ar) {
background-image: url("themes/mediawiki/images/icons/bold-arab-ain.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-ain.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(be) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-te.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-te.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(cs) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(en) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(he) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ml) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(pl) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(sco) {
background-image: url("themes/mediawiki/images/icons/bold-b.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(da) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(de) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(hu) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ksh) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(nn) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(no) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(sv) {
background-image: url("themes/mediawiki/images/icons/bold-f.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(es) {
background-image: url("themes/mediawiki/images/icons/bold-n.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(gl) {
background-image: url("themes/mediawiki/images/icons/bold-n.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(pt) {
background-image: url("themes/mediawiki/images/icons/bold-n.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(eu) {
background-image: url("themes/mediawiki/images/icons/bold-l.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(fi) {
background-image: url("themes/mediawiki/images/icons/bold-l.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(fa) {
background-image: url("themes/mediawiki/images/icons/bold-arab-dad.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-dad.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(fr) {
background-image: url("themes/mediawiki/images/icons/bold-g.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(it) {
background-image: url("themes/mediawiki/images/icons/bold-g.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(hy) {
background-image: url("themes/mediawiki/images/icons/bold-armn-to.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-armn-to.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ka) {
background-image: url("themes/mediawiki/images/icons/bold-geor-man.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-geor-man.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ky) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(ru) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(nl) {
background-image: url("themes/mediawiki/images/icons/bold-v.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-v.png");
}
/* @noflip */
.oo-ui-icon-bold:lang(os) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-be.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-be.png");
}
.oo-ui-icon-bold-invert {
background-image: url("themes/mediawiki/images/icons/bold-a-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-a-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-a-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ar) {
background-image: url("themes/mediawiki/images/icons/bold-arab-ain-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-ain-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-ain-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(be) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-te-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-te-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-te-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(cs) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(en) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(he) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ml) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(pl) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(sco) {
background-image: url("themes/mediawiki/images/icons/bold-b-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-b-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-b-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(da) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(de) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(hu) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ksh) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(nn) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(no) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(sv) {
background-image: url("themes/mediawiki/images/icons/bold-f-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-f-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-f-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(es) {
background-image: url("themes/mediawiki/images/icons/bold-n-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(gl) {
background-image: url("themes/mediawiki/images/icons/bold-n-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(pt) {
background-image: url("themes/mediawiki/images/icons/bold-n-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-n-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-n-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(eu) {
background-image: url("themes/mediawiki/images/icons/bold-l-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(fi) {
background-image: url("themes/mediawiki/images/icons/bold-l-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-l-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-l-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(fa) {
background-image: url("themes/mediawiki/images/icons/bold-arab-dad-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-arab-dad-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-arab-dad-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(fr) {
background-image: url("themes/mediawiki/images/icons/bold-g-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(it) {
background-image: url("themes/mediawiki/images/icons/bold-g-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-g-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-g-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(hy) {
background-image: url("themes/mediawiki/images/icons/bold-armn-to-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-armn-to-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-armn-to-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ka) {
background-image: url("themes/mediawiki/images/icons/bold-geor-man-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-geor-man-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-geor-man-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ky) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(ru) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-zhe-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(nl) {
background-image: url("themes/mediawiki/images/icons/bold-v-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-v-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-v-invert.png");
}
/* @noflip */
.oo-ui-icon-bold-invert:lang(os) {
background-image: url("themes/mediawiki/images/icons/bold-cyrl-be-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/bold-cyrl-be-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/bold-cyrl-be-invert.png");
}
.oo-ui-icon-italic {
background-image: url("themes/mediawiki/images/icons/italic-a.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-a.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ar) {
background-image: url("themes/mediawiki/images/icons/italic-arab-meem.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-arab-meem.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(cs) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(en) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(fr) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(he) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ml) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(pl) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(pt) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(sco) {
background-image: url("themes/mediawiki/images/icons/italic-i.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(be) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(da) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(de) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(fi) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ky) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(nn) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(no) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(os) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(sv) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ru) {
background-image: url("themes/mediawiki/images/icons/italic-k.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(es) {
background-image: url("themes/mediawiki/images/icons/italic-c.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(gl) {
background-image: url("themes/mediawiki/images/icons/italic-c.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(it) {
background-image: url("themes/mediawiki/images/icons/italic-c.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(nl) {
background-image: url("themes/mediawiki/images/icons/italic-c.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(eu) {
background-image: url("themes/mediawiki/images/icons/italic-e.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-e.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(fa) {
background-image: url("themes/mediawiki/images/icons/italic-arab-keheh-jeem.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-keheh-jeem.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-keheh-jeem.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-arab-keheh-jeem.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(hu) {
background-image: url("themes/mediawiki/images/icons/italic-d.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-d.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(hy) {
background-image: url("themes/mediawiki/images/icons/italic-armn-sha.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-armn-sha.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ksh) {
background-image: url("themes/mediawiki/images/icons/italic-s.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-s.png");
}
/* @noflip */
.oo-ui-icon-italic:lang(ka) {
background-image: url("themes/mediawiki/images/icons/italic-geor-kan.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-geor-kan.png");
}
.oo-ui-icon-italic-invert {
background-image: url("themes/mediawiki/images/icons/italic-a-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-a-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-a-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ar) {
background-image: url("themes/mediawiki/images/icons/italic-arab-meem-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-meem-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-arab-meem-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(cs) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(en) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(fr) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(he) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ml) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(pl) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(pt) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(sco) {
background-image: url("themes/mediawiki/images/icons/italic-i-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-i-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-i-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(be) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(da) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(de) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(fi) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ky) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(nn) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(no) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(os) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(sv) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ru) {
background-image: url("themes/mediawiki/images/icons/italic-k-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-k-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-k-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(es) {
background-image: url("themes/mediawiki/images/icons/italic-c-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(gl) {
background-image: url("themes/mediawiki/images/icons/italic-c-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(it) {
background-image: url("themes/mediawiki/images/icons/italic-c-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(nl) {
background-image: url("themes/mediawiki/images/icons/italic-c-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-c-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-c-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(eu) {
background-image: url("themes/mediawiki/images/icons/italic-e-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-e-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-e-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(fa) {
background-image: url("themes/mediawiki/images/icons/italic-arab-keheh-jeem-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-keheh-jeem-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-arab-keheh-jeem-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-arab-keheh-jeem-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(hu) {
background-image: url("themes/mediawiki/images/icons/italic-d-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-d-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-d-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(hy) {
background-image: url("themes/mediawiki/images/icons/italic-armn-sha-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-armn-sha-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-armn-sha-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ksh) {
background-image: url("themes/mediawiki/images/icons/italic-s-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-s-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-s-invert.png");
}
/* @noflip */
.oo-ui-icon-italic-invert:lang(ka) {
background-image: url("themes/mediawiki/images/icons/italic-geor-kan-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/italic-geor-kan-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/italic-geor-kan-invert.png");
}
.oo-ui-icon-strikethrough {
background-image: url("themes/mediawiki/images/icons/strikethrough-a.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-a.png");
}
/* @noflip */
.oo-ui-icon-strikethrough:lang(en) {
background-image: url("themes/mediawiki/images/icons/strikethrough-s.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-s.png");
}
/* @noflip */
.oo-ui-icon-strikethrough:lang(fi) {
background-image: url("themes/mediawiki/images/icons/strikethrough-y.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-y.png");
}
.oo-ui-icon-strikethrough-invert {
background-image: url("themes/mediawiki/images/icons/strikethrough-a-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-a-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-a-invert.png");
}
/* @noflip */
.oo-ui-icon-strikethrough-invert:lang(en) {
background-image: url("themes/mediawiki/images/icons/strikethrough-s-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-s-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-s-invert.png");
}
/* @noflip */
.oo-ui-icon-strikethrough-invert:lang(fi) {
background-image: url("themes/mediawiki/images/icons/strikethrough-y-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/strikethrough-y-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/strikethrough-y-invert.png");
}
.oo-ui-icon-underline {
background-image: url("themes/mediawiki/images/icons/underline-a.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-a.png");
}
/* @noflip */
.oo-ui-icon-underline:lang(en) {
background-image: url("themes/mediawiki/images/icons/underline-u.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-u.png");
}
.oo-ui-icon-underline-invert {
background-image: url("themes/mediawiki/images/icons/underline-a-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-a-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-a-invert.png");
}
/* @noflip */
.oo-ui-icon-underline-invert:lang(en) {
background-image: url("themes/mediawiki/images/icons/underline-u-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/underline-u-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/underline-u-invert.png");
}
.oo-ui-icon-textLanguage {
background-image: url("themes/mediawiki/images/icons/language.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/language.png");
}
.oo-ui-icon-textLanguage-invert {
background-image: url("themes/mediawiki/images/icons/language-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/language-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/language-invert.png");
}
.oo-ui-icon-textDirLTR {
background-image: url("themes/mediawiki/images/icons/text-dir-lefttoright.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-lefttoright.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-lefttoright.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-dir-lefttoright.png");
}
.oo-ui-icon-textDirLTR-invert {
background-image: url("themes/mediawiki/images/icons/text-dir-lefttoright-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-lefttoright-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-lefttoright-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-dir-lefttoright-invert.png");
}
.oo-ui-icon-textDirRTL {
background-image: url("themes/mediawiki/images/icons/text-dir-righttoleft.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-righttoleft.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-righttoleft.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-dir-righttoleft.png");
}
.oo-ui-icon-textDirRTL-invert {
background-image: url("themes/mediawiki/images/icons/text-dir-righttoleft-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-righttoleft-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-dir-righttoleft-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-dir-righttoleft-invert.png");
}
.oo-ui-icon-textStyle {
background-image: url("themes/mediawiki/images/icons/text-style.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-style.png");
}
.oo-ui-icon-textStyle-invert {
background-image: url("themes/mediawiki/images/icons/text-style-invert.png");
background-image: -webkit-linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style-invert.svg");
background-image: linear-gradient(transparent, transparent), /* @embed */ url("themes/mediawiki/images/icons/text-style-invert.svg");
background-image: -o-linear-gradient(transparent, transparent), url("themes/mediawiki/images/icons/text-style-invert.png");
}
| brix/cdnjs | ajax/libs/oojs-ui/0.12.4/oojs-ui-mediawiki-icons-editing-styling.rtl.css | CSS | mit | 76,751 |
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP2, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP2, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
'''
This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY)
Connect to a single node.
Mine 2 (version 3) blocks (save the coinbases for later).
Generate 98 more version 3 blocks, verify the node accepts.
Mine 749 version 4 blocks, verify the node accepts.
Check that the new CLTV rules are not enforced on the 750th version 4 block.
Check that the new CLTV rules are enforced on the 751st version 4 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP65Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].setgenerate(True, 2)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = time.time()
''' 98 more version 3 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 4 blocks '''
test_blocks = []
for i in xrange(749):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new CLTV rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Check that the new CLTV rules are enforced in the 751st version 4
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(199):
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP65Test().main()
| octocoin-project/octocoin | qa/rpc-tests/bip65-cltv-p2p.py | Python | mit | 6,411 |
/*! MVW-Injection (0.2.5). (C) 2015 Xavier Boubert. MIT @license: en.wikipedia.org/wiki/MIT_License */
(function(root) {
'use strict';
var DependencyInjection = new (function DependencyInjection() {
var _this = this,
_interfaces = {};
function _formatFactoryFunction(factoryFunction) {
if (typeof factoryFunction == 'function') {
var funcString = factoryFunction
.toString()
// remove comments
.replace(/((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg, '');
var matches = funcString.match(/^function\s*[^\(]*\s*\(\s*([^\)]*)\)/m);
if (matches === null || matches.length < 2) {
factoryFunction = [factoryFunction];
}
else {
factoryFunction = matches[1]
.replace(/\s/g, '')
.split(',')
.filter(function(arg) {
return arg.trim().length > 0;
})
.concat(factoryFunction);
}
return factoryFunction;
}
else {
var factoryArrayCopy = [];
for (var i = 0; i < factoryFunction.length; i++) {
factoryArrayCopy.push(factoryFunction[i]);
}
factoryFunction = factoryArrayCopy;
}
return factoryFunction;
}
function Injector(instanceName) {
function _getInjections(dependencies, name, customDependencies, noError) {
var interfaces = _interfaces[name].interfacesSupported,
injections = [],
i,
j;
for (i = 0; i < dependencies.length; i++) {
var factory = null;
if (customDependencies && typeof customDependencies[dependencies[i]] != 'undefined') {
factory = customDependencies[dependencies[i]];
}
else {
for (j = 0; j < interfaces.length; j++) {
if (!_interfaces[interfaces[j]]) {
if (noError) {
return false;
}
throw new Error('DependencyInjection: "' + interfaces[j] + '" interface is not registered.');
}
factory = _interfaces[interfaces[j]].factories[dependencies[i]];
if (factory) {
factory.interfaceName = interfaces[j];
break;
}
}
}
if (factory) {
if (!factory.instantiated) {
var deps = _formatFactoryFunction(factory.result);
factory.result = deps.pop();
var factoryInjections = _getInjections(deps, factory.interfaceName);
factory.result = factory.result.apply(_this, factoryInjections);
factory.instantiated = true;
}
injections.push(factory.result);
}
else {
if (noError) {
return false;
}
throw new Error('DependencyInjection: "' + dependencies[i] + '" is not registered or accessible in ' + name + '.');
}
}
return injections;
}
this.get = function(factoryName, noError) {
var injections = _getInjections([factoryName], instanceName, null, noError);
if (injections.length) {
return injections[0];
}
return false;
};
this.invoke = function(thisArg, func, customDependencies) {
var dependencies = _formatFactoryFunction(func);
func = dependencies.pop();
if (customDependencies) {
var formatcustomDependencies = {},
interfaceName,
factory;
for (interfaceName in customDependencies) {
for (factory in customDependencies[interfaceName]) {
formatcustomDependencies[factory] = {
interfaceName: interfaceName,
instantiated: false,
result: customDependencies[interfaceName][factory]
};
}
}
customDependencies = formatcustomDependencies;
}
var injections = _getInjections(dependencies, instanceName, customDependencies);
return func.apply(thisArg, injections);
};
}
this.injector = {};
this.registerInterface = function(name, canInjectInterfaces) {
if (_this[name]) {
return _this;
}
_interfaces[name] = {
interfacesSupported: (canInjectInterfaces || []).concat(name),
factories: {}
};
_this.injector[name] = new Injector(name);
_this[name] = function DependencyInjectionFactory(factoryName, factoryFunction, replaceIfExists) {
if (!replaceIfExists && _interfaces[name].factories[factoryName]) {
return _this;
}
_interfaces[name].factories[factoryName] = {
instantiated: false,
result: factoryFunction
};
return _this;
};
return _this;
};
})();
if (typeof module != 'undefined' && typeof module.exports != 'undefined') {
module.exports = DependencyInjection;
}
else {
root.DependencyInjection = DependencyInjection;
}
})(this);
| dlueth/cdnjs | ajax/libs/mvw-injection/0.2.5/dependency-injection.js | JavaScript | mit | 5,110 |
/**
* Copyright 2013-2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ReactCompositeComponent
*/
"use strict";
var ReactComponent = require("./ReactComponent");
var ReactContext = require("./ReactContext");
var ReactCurrentOwner = require("./ReactCurrentOwner");
var ReactElement = require("./ReactElement");
var ReactElementValidator = require("./ReactElementValidator");
var ReactEmptyComponent = require("./ReactEmptyComponent");
var ReactErrorUtils = require("./ReactErrorUtils");
var ReactLegacyElement = require("./ReactLegacyElement");
var ReactOwner = require("./ReactOwner");
var ReactPerf = require("./ReactPerf");
var ReactPropTransferer = require("./ReactPropTransferer");
var ReactPropTypeLocations = require("./ReactPropTypeLocations");
var ReactPropTypeLocationNames = require("./ReactPropTypeLocationNames");
var ReactUpdates = require("./ReactUpdates");
var assign = require("./Object.assign");
var instantiateReactComponent = require("./instantiateReactComponent");
var invariant = require("./invariant");
var keyMirror = require("./keyMirror");
var keyOf = require("./keyOf");
var monitorCodeUse = require("./monitorCodeUse");
var mapObject = require("./mapObject");
var shouldUpdateReactComponent = require("./shouldUpdateReactComponent");
var warning = require("./warning");
var MIXINS_KEY = keyOf({mixins: null});
/**
* Policies that describe methods in `ReactCompositeComponentInterface`.
*/
var SpecPolicy = keyMirror({
/**
* These methods may be defined only once by the class specification or mixin.
*/
DEFINE_ONCE: null,
/**
* These methods may be defined by both the class specification and mixins.
* Subsequent definitions will be chained. These methods must return void.
*/
DEFINE_MANY: null,
/**
* These methods are overriding the base ReactCompositeComponent class.
*/
OVERRIDE_BASE: null,
/**
* These methods are similar to DEFINE_MANY, except we assume they return
* objects. We try to merge the keys of the return values of all the mixed in
* functions. If there is a key conflict we throw.
*/
DEFINE_MANY_MERGED: null
});
var injectedMixins = [];
/**
* Composite components are higher-level components that compose other composite
* or native components.
*
* To create a new type of `ReactCompositeComponent`, pass a specification of
* your new class to `React.createClass`. The only requirement of your class
* specification is that you implement a `render` method.
*
* var MyComponent = React.createClass({
* render: function() {
* return <div>Hello World</div>;
* }
* });
*
* The class specification supports a specific protocol of methods that have
* special meaning (e.g. `render`). See `ReactCompositeComponentInterface` for
* more the comprehensive protocol. Any other properties and methods in the
* class specification will available on the prototype.
*
* @interface ReactCompositeComponentInterface
* @internal
*/
var ReactCompositeComponentInterface = {
/**
* An array of Mixin objects to include when defining your component.
*
* @type {array}
* @optional
*/
mixins: SpecPolicy.DEFINE_MANY,
/**
* An object containing properties and methods that should be defined on
* the component's constructor instead of its prototype (static methods).
*
* @type {object}
* @optional
*/
statics: SpecPolicy.DEFINE_MANY,
/**
* Definition of prop types for this component.
*
* @type {object}
* @optional
*/
propTypes: SpecPolicy.DEFINE_MANY,
/**
* Definition of context types for this component.
*
* @type {object}
* @optional
*/
contextTypes: SpecPolicy.DEFINE_MANY,
/**
* Definition of context types this component sets for its children.
*
* @type {object}
* @optional
*/
childContextTypes: SpecPolicy.DEFINE_MANY,
// ==== Definition methods ====
/**
* Invoked when the component is mounted. Values in the mapping will be set on
* `this.props` if that prop is not specified (i.e. using an `in` check).
*
* This method is invoked before `getInitialState` and therefore cannot rely
* on `this.state` or use `this.setState`.
*
* @return {object}
* @optional
*/
getDefaultProps: SpecPolicy.DEFINE_MANY_MERGED,
/**
* Invoked once before the component is mounted. The return value will be used
* as the initial value of `this.state`.
*
* getInitialState: function() {
* return {
* isOn: false,
* fooBaz: new BazFoo()
* }
* }
*
* @return {object}
* @optional
*/
getInitialState: SpecPolicy.DEFINE_MANY_MERGED,
/**
* @return {object}
* @optional
*/
getChildContext: SpecPolicy.DEFINE_MANY_MERGED,
/**
* Uses props from `this.props` and state from `this.state` to render the
* structure of the component.
*
* No guarantees are made about when or how often this method is invoked, so
* it must not have side effects.
*
* render: function() {
* var name = this.props.name;
* return <div>Hello, {name}!</div>;
* }
*
* @return {ReactComponent}
* @nosideeffects
* @required
*/
render: SpecPolicy.DEFINE_ONCE,
// ==== Delegate methods ====
/**
* Invoked when the component is initially created and about to be mounted.
* This may have side effects, but any external subscriptions or data created
* by this method must be cleaned up in `componentWillUnmount`.
*
* @optional
*/
componentWillMount: SpecPolicy.DEFINE_MANY,
/**
* Invoked when the component has been mounted and has a DOM representation.
* However, there is no guarantee that the DOM node is in the document.
*
* Use this as an opportunity to operate on the DOM when the component has
* been mounted (initialized and rendered) for the first time.
*
* @param {DOMElement} rootNode DOM element representing the component.
* @optional
*/
componentDidMount: SpecPolicy.DEFINE_MANY,
/**
* Invoked before the component receives new props.
*
* Use this as an opportunity to react to a prop transition by updating the
* state using `this.setState`. Current props are accessed via `this.props`.
*
* componentWillReceiveProps: function(nextProps, nextContext) {
* this.setState({
* likesIncreasing: nextProps.likeCount > this.props.likeCount
* });
* }
*
* NOTE: There is no equivalent `componentWillReceiveState`. An incoming prop
* transition may cause a state change, but the opposite is not true. If you
* need it, you are probably looking for `componentWillUpdate`.
*
* @param {object} nextProps
* @optional
*/
componentWillReceiveProps: SpecPolicy.DEFINE_MANY,
/**
* Invoked while deciding if the component should be updated as a result of
* receiving new props, state and/or context.
*
* Use this as an opportunity to `return false` when you're certain that the
* transition to the new props/state/context will not require a component
* update.
*
* shouldComponentUpdate: function(nextProps, nextState, nextContext) {
* return !equal(nextProps, this.props) ||
* !equal(nextState, this.state) ||
* !equal(nextContext, this.context);
* }
*
* @param {object} nextProps
* @param {?object} nextState
* @param {?object} nextContext
* @return {boolean} True if the component should update.
* @optional
*/
shouldComponentUpdate: SpecPolicy.DEFINE_ONCE,
/**
* Invoked when the component is about to update due to a transition from
* `this.props`, `this.state` and `this.context` to `nextProps`, `nextState`
* and `nextContext`.
*
* Use this as an opportunity to perform preparation before an update occurs.
*
* NOTE: You **cannot** use `this.setState()` in this method.
*
* @param {object} nextProps
* @param {?object} nextState
* @param {?object} nextContext
* @param {ReactReconcileTransaction} transaction
* @optional
*/
componentWillUpdate: SpecPolicy.DEFINE_MANY,
/**
* Invoked when the component's DOM representation has been updated.
*
* Use this as an opportunity to operate on the DOM when the component has
* been updated.
*
* @param {object} prevProps
* @param {?object} prevState
* @param {?object} prevContext
* @param {DOMElement} rootNode DOM element representing the component.
* @optional
*/
componentDidUpdate: SpecPolicy.DEFINE_MANY,
/**
* Invoked when the component is about to be removed from its parent and have
* its DOM representation destroyed.
*
* Use this as an opportunity to deallocate any external resources.
*
* NOTE: There is no `componentDidUnmount` since your component will have been
* destroyed by that point.
*
* @optional
*/
componentWillUnmount: SpecPolicy.DEFINE_MANY,
// ==== Advanced methods ====
/**
* Updates the component's currently mounted DOM representation.
*
* By default, this implements React's rendering and reconciliation algorithm.
* Sophisticated clients may wish to override this.
*
* @param {ReactReconcileTransaction} transaction
* @internal
* @overridable
*/
updateComponent: SpecPolicy.OVERRIDE_BASE
};
/**
* Mapping from class specification keys to special processing functions.
*
* Although these are declared like instance properties in the specification
* when defining classes using `React.createClass`, they are actually static
* and are accessible on the constructor instead of the prototype. Despite
* being static, they must be defined outside of the "statics" key under
* which all other static methods are defined.
*/
var RESERVED_SPEC_KEYS = {
displayName: function(Constructor, displayName) {
Constructor.displayName = displayName;
},
mixins: function(Constructor, mixins) {
if (mixins) {
for (var i = 0; i < mixins.length; i++) {
mixSpecIntoComponent(Constructor, mixins[i]);
}
}
},
childContextTypes: function(Constructor, childContextTypes) {
validateTypeDef(
Constructor,
childContextTypes,
ReactPropTypeLocations.childContext
);
Constructor.childContextTypes = assign(
{},
Constructor.childContextTypes,
childContextTypes
);
},
contextTypes: function(Constructor, contextTypes) {
validateTypeDef(
Constructor,
contextTypes,
ReactPropTypeLocations.context
);
Constructor.contextTypes = assign(
{},
Constructor.contextTypes,
contextTypes
);
},
/**
* Special case getDefaultProps which should move into statics but requires
* automatic merging.
*/
getDefaultProps: function(Constructor, getDefaultProps) {
if (Constructor.getDefaultProps) {
Constructor.getDefaultProps = createMergedResultFunction(
Constructor.getDefaultProps,
getDefaultProps
);
} else {
Constructor.getDefaultProps = getDefaultProps;
}
},
propTypes: function(Constructor, propTypes) {
validateTypeDef(
Constructor,
propTypes,
ReactPropTypeLocations.prop
);
Constructor.propTypes = assign(
{},
Constructor.propTypes,
propTypes
);
},
statics: function(Constructor, statics) {
mixStaticSpecIntoComponent(Constructor, statics);
}
};
function getDeclarationErrorAddendum(component) {
var owner = component._owner || null;
if (owner && owner.constructor && owner.constructor.displayName) {
return ' Check the render method of `' + owner.constructor.displayName +
'`.';
}
return '';
}
function validateTypeDef(Constructor, typeDef, location) {
for (var propName in typeDef) {
if (typeDef.hasOwnProperty(propName)) {
("production" !== process.env.NODE_ENV ? invariant(
typeof typeDef[propName] == 'function',
'%s: %s type `%s` is invalid; it must be a function, usually from ' +
'React.PropTypes.',
Constructor.displayName || 'ReactCompositeComponent',
ReactPropTypeLocationNames[location],
propName
) : invariant(typeof typeDef[propName] == 'function'));
}
}
}
function validateMethodOverride(proto, name) {
var specPolicy = ReactCompositeComponentInterface.hasOwnProperty(name) ?
ReactCompositeComponentInterface[name] :
null;
// Disallow overriding of base class methods unless explicitly allowed.
if (ReactCompositeComponentMixin.hasOwnProperty(name)) {
("production" !== process.env.NODE_ENV ? invariant(
specPolicy === SpecPolicy.OVERRIDE_BASE,
'ReactCompositeComponentInterface: You are attempting to override ' +
'`%s` from your class specification. Ensure that your method names ' +
'do not overlap with React methods.',
name
) : invariant(specPolicy === SpecPolicy.OVERRIDE_BASE));
}
// Disallow defining methods more than once unless explicitly allowed.
if (proto.hasOwnProperty(name)) {
("production" !== process.env.NODE_ENV ? invariant(
specPolicy === SpecPolicy.DEFINE_MANY ||
specPolicy === SpecPolicy.DEFINE_MANY_MERGED,
'ReactCompositeComponentInterface: You are attempting to define ' +
'`%s` on your component more than once. This conflict may be due ' +
'to a mixin.',
name
) : invariant(specPolicy === SpecPolicy.DEFINE_MANY ||
specPolicy === SpecPolicy.DEFINE_MANY_MERGED));
}
}
function validateLifeCycleOnReplaceState(instance) {
var compositeLifeCycleState = instance._compositeLifeCycleState;
("production" !== process.env.NODE_ENV ? invariant(
instance.isMounted() ||
compositeLifeCycleState === CompositeLifeCycle.MOUNTING,
'replaceState(...): Can only update a mounted or mounting component.'
) : invariant(instance.isMounted() ||
compositeLifeCycleState === CompositeLifeCycle.MOUNTING));
("production" !== process.env.NODE_ENV ? invariant(
ReactCurrentOwner.current == null,
'replaceState(...): Cannot update during an existing state transition ' +
'(such as within `render`). Render methods should be a pure function ' +
'of props and state.'
) : invariant(ReactCurrentOwner.current == null));
("production" !== process.env.NODE_ENV ? invariant(compositeLifeCycleState !== CompositeLifeCycle.UNMOUNTING,
'replaceState(...): Cannot update while unmounting component. This ' +
'usually means you called setState() on an unmounted component.'
) : invariant(compositeLifeCycleState !== CompositeLifeCycle.UNMOUNTING));
}
/**
* Mixin helper which handles policy validation and reserved
* specification keys when building `ReactCompositeComponent` classses.
*/
function mixSpecIntoComponent(Constructor, spec) {
if (!spec) {
return;
}
("production" !== process.env.NODE_ENV ? invariant(
!ReactLegacyElement.isValidFactory(spec),
'ReactCompositeComponent: You\'re attempting to ' +
'use a component class as a mixin. Instead, just use a regular object.'
) : invariant(!ReactLegacyElement.isValidFactory(spec)));
("production" !== process.env.NODE_ENV ? invariant(
!ReactElement.isValidElement(spec),
'ReactCompositeComponent: You\'re attempting to ' +
'use a component as a mixin. Instead, just use a regular object.'
) : invariant(!ReactElement.isValidElement(spec)));
var proto = Constructor.prototype;
// By handling mixins before any other properties, we ensure the same
// chaining order is applied to methods with DEFINE_MANY policy, whether
// mixins are listed before or after these methods in the spec.
if (spec.hasOwnProperty(MIXINS_KEY)) {
RESERVED_SPEC_KEYS.mixins(Constructor, spec.mixins);
}
for (var name in spec) {
if (!spec.hasOwnProperty(name)) {
continue;
}
if (name === MIXINS_KEY) {
// We have already handled mixins in a special case above
continue;
}
var property = spec[name];
validateMethodOverride(proto, name);
if (RESERVED_SPEC_KEYS.hasOwnProperty(name)) {
RESERVED_SPEC_KEYS[name](Constructor, property);
} else {
// Setup methods on prototype:
// The following member methods should not be automatically bound:
// 1. Expected ReactCompositeComponent methods (in the "interface").
// 2. Overridden methods (that were mixed in).
var isCompositeComponentMethod =
ReactCompositeComponentInterface.hasOwnProperty(name);
var isAlreadyDefined = proto.hasOwnProperty(name);
var markedDontBind = property && property.__reactDontBind;
var isFunction = typeof property === 'function';
var shouldAutoBind =
isFunction &&
!isCompositeComponentMethod &&
!isAlreadyDefined &&
!markedDontBind;
if (shouldAutoBind) {
if (!proto.__reactAutoBindMap) {
proto.__reactAutoBindMap = {};
}
proto.__reactAutoBindMap[name] = property;
proto[name] = property;
} else {
if (isAlreadyDefined) {
var specPolicy = ReactCompositeComponentInterface[name];
// These cases should already be caught by validateMethodOverride
("production" !== process.env.NODE_ENV ? invariant(
isCompositeComponentMethod && (
specPolicy === SpecPolicy.DEFINE_MANY_MERGED ||
specPolicy === SpecPolicy.DEFINE_MANY
),
'ReactCompositeComponent: Unexpected spec policy %s for key %s ' +
'when mixing in component specs.',
specPolicy,
name
) : invariant(isCompositeComponentMethod && (
specPolicy === SpecPolicy.DEFINE_MANY_MERGED ||
specPolicy === SpecPolicy.DEFINE_MANY
)));
// For methods which are defined more than once, call the existing
// methods before calling the new property, merging if appropriate.
if (specPolicy === SpecPolicy.DEFINE_MANY_MERGED) {
proto[name] = createMergedResultFunction(proto[name], property);
} else if (specPolicy === SpecPolicy.DEFINE_MANY) {
proto[name] = createChainedFunction(proto[name], property);
}
} else {
proto[name] = property;
if ("production" !== process.env.NODE_ENV) {
// Add verbose displayName to the function, which helps when looking
// at profiling tools.
if (typeof property === 'function' && spec.displayName) {
proto[name].displayName = spec.displayName + '_' + name;
}
}
}
}
}
}
}
function mixStaticSpecIntoComponent(Constructor, statics) {
if (!statics) {
return;
}
for (var name in statics) {
var property = statics[name];
if (!statics.hasOwnProperty(name)) {
continue;
}
var isReserved = name in RESERVED_SPEC_KEYS;
("production" !== process.env.NODE_ENV ? invariant(
!isReserved,
'ReactCompositeComponent: You are attempting to define a reserved ' +
'property, `%s`, that shouldn\'t be on the "statics" key. Define it ' +
'as an instance property instead; it will still be accessible on the ' +
'constructor.',
name
) : invariant(!isReserved));
var isInherited = name in Constructor;
("production" !== process.env.NODE_ENV ? invariant(
!isInherited,
'ReactCompositeComponent: You are attempting to define ' +
'`%s` on your component more than once. This conflict may be ' +
'due to a mixin.',
name
) : invariant(!isInherited));
Constructor[name] = property;
}
}
/**
* Merge two objects, but throw if both contain the same key.
*
* @param {object} one The first object, which is mutated.
* @param {object} two The second object
* @return {object} one after it has been mutated to contain everything in two.
*/
function mergeObjectsWithNoDuplicateKeys(one, two) {
("production" !== process.env.NODE_ENV ? invariant(
one && two && typeof one === 'object' && typeof two === 'object',
'mergeObjectsWithNoDuplicateKeys(): Cannot merge non-objects'
) : invariant(one && two && typeof one === 'object' && typeof two === 'object'));
mapObject(two, function(value, key) {
("production" !== process.env.NODE_ENV ? invariant(
one[key] === undefined,
'mergeObjectsWithNoDuplicateKeys(): ' +
'Tried to merge two objects with the same key: `%s`. This conflict ' +
'may be due to a mixin; in particular, this may be caused by two ' +
'getInitialState() or getDefaultProps() methods returning objects ' +
'with clashing keys.',
key
) : invariant(one[key] === undefined));
one[key] = value;
});
return one;
}
/**
* Creates a function that invokes two functions and merges their return values.
*
* @param {function} one Function to invoke first.
* @param {function} two Function to invoke second.
* @return {function} Function that invokes the two argument functions.
* @private
*/
function createMergedResultFunction(one, two) {
return function mergedResult() {
var a = one.apply(this, arguments);
var b = two.apply(this, arguments);
if (a == null) {
return b;
} else if (b == null) {
return a;
}
return mergeObjectsWithNoDuplicateKeys(a, b);
};
}
/**
* Creates a function that invokes two functions and ignores their return vales.
*
* @param {function} one Function to invoke first.
* @param {function} two Function to invoke second.
* @return {function} Function that invokes the two argument functions.
* @private
*/
function createChainedFunction(one, two) {
return function chainedFunction() {
one.apply(this, arguments);
two.apply(this, arguments);
};
}
/**
* `ReactCompositeComponent` maintains an auxiliary life cycle state in
* `this._compositeLifeCycleState` (which can be null).
*
* This is different from the life cycle state maintained by `ReactComponent` in
* `this._lifeCycleState`. The following diagram shows how the states overlap in
* time. There are times when the CompositeLifeCycle is null - at those times it
* is only meaningful to look at ComponentLifeCycle alone.
*
* Top Row: ReactComponent.ComponentLifeCycle
* Low Row: ReactComponent.CompositeLifeCycle
*
* +-------+---------------------------------+--------+
* | UN | MOUNTED | UN |
* |MOUNTED| | MOUNTED|
* +-------+---------------------------------+--------+
* | ^--------+ +-------+ +--------^ |
* | | | | | | | |
* | 0--|MOUNTING|-0-|RECEIVE|-0-| UN |--->0 |
* | | | |PROPS | |MOUNTING| |
* | | | | | | | |
* | | | | | | | |
* | +--------+ +-------+ +--------+ |
* | | | |
* +-------+---------------------------------+--------+
*/
var CompositeLifeCycle = keyMirror({
/**
* Components in the process of being mounted respond to state changes
* differently.
*/
MOUNTING: null,
/**
* Components in the process of being unmounted are guarded against state
* changes.
*/
UNMOUNTING: null,
/**
* Components that are mounted and receiving new props respond to state
* changes differently.
*/
RECEIVING_PROPS: null
});
/**
* @lends {ReactCompositeComponent.prototype}
*/
var ReactCompositeComponentMixin = {
/**
* Base constructor for all composite component.
*
* @param {ReactElement} element
* @final
* @internal
*/
construct: function(element) {
// Children can be either an array or more than one argument
ReactComponent.Mixin.construct.apply(this, arguments);
ReactOwner.Mixin.construct.apply(this, arguments);
this.state = null;
this._pendingState = null;
// This is the public post-processed context. The real context and pending
// context lives on the element.
this.context = null;
this._compositeLifeCycleState = null;
},
/**
* Checks whether or not this composite component is mounted.
* @return {boolean} True if mounted, false otherwise.
* @protected
* @final
*/
isMounted: function() {
return ReactComponent.Mixin.isMounted.call(this) &&
this._compositeLifeCycleState !== CompositeLifeCycle.MOUNTING;
},
/**
* Initializes the component, renders markup, and registers event listeners.
*
* @param {string} rootID DOM ID of the root node.
* @param {ReactReconcileTransaction|ReactServerRenderingTransaction} transaction
* @param {number} mountDepth number of components in the owner hierarchy
* @return {?string} Rendered markup to be inserted into the DOM.
* @final
* @internal
*/
mountComponent: ReactPerf.measure(
'ReactCompositeComponent',
'mountComponent',
function(rootID, transaction, mountDepth) {
ReactComponent.Mixin.mountComponent.call(
this,
rootID,
transaction,
mountDepth
);
this._compositeLifeCycleState = CompositeLifeCycle.MOUNTING;
if (this.__reactAutoBindMap) {
this._bindAutoBindMethods();
}
this.context = this._processContext(this._currentElement._context);
this.props = this._processProps(this.props);
this.state = this.getInitialState ? this.getInitialState() : null;
("production" !== process.env.NODE_ENV ? invariant(
typeof this.state === 'object' && !Array.isArray(this.state),
'%s.getInitialState(): must return an object or null',
this.constructor.displayName || 'ReactCompositeComponent'
) : invariant(typeof this.state === 'object' && !Array.isArray(this.state)));
this._pendingState = null;
this._pendingForceUpdate = false;
if (this.componentWillMount) {
this.componentWillMount();
// When mounting, calls to `setState` by `componentWillMount` will set
// `this._pendingState` without triggering a re-render.
if (this._pendingState) {
this.state = this._pendingState;
this._pendingState = null;
}
}
this._renderedComponent = instantiateReactComponent(
this._renderValidatedComponent(),
this._currentElement.type // The wrapping type
);
// Done with mounting, `setState` will now trigger UI changes.
this._compositeLifeCycleState = null;
var markup = this._renderedComponent.mountComponent(
rootID,
transaction,
mountDepth + 1
);
if (this.componentDidMount) {
transaction.getReactMountReady().enqueue(this.componentDidMount, this);
}
return markup;
}
),
/**
* Releases any resources allocated by `mountComponent`.
*
* @final
* @internal
*/
unmountComponent: function() {
this._compositeLifeCycleState = CompositeLifeCycle.UNMOUNTING;
if (this.componentWillUnmount) {
this.componentWillUnmount();
}
this._compositeLifeCycleState = null;
this._renderedComponent.unmountComponent();
this._renderedComponent = null;
ReactComponent.Mixin.unmountComponent.call(this);
// Some existing components rely on this.props even after they've been
// destroyed (in event handlers).
// TODO: this.props = null;
// TODO: this.state = null;
},
/**
* Sets a subset of the state. Always use this or `replaceState` to mutate
* state. You should treat `this.state` as immutable.
*
* There is no guarantee that `this.state` will be immediately updated, so
* accessing `this.state` after calling this method may return the old value.
*
* There is no guarantee that calls to `setState` will run synchronously,
* as they may eventually be batched together. You can provide an optional
* callback that will be executed when the call to setState is actually
* completed.
*
* @param {object} partialState Next partial state to be merged with state.
* @param {?function} callback Called after state is updated.
* @final
* @protected
*/
setState: function(partialState, callback) {
("production" !== process.env.NODE_ENV ? invariant(
typeof partialState === 'object' || partialState == null,
'setState(...): takes an object of state variables to update.'
) : invariant(typeof partialState === 'object' || partialState == null));
if ("production" !== process.env.NODE_ENV){
("production" !== process.env.NODE_ENV ? warning(
partialState != null,
'setState(...): You passed an undefined or null state object; ' +
'instead, use forceUpdate().'
) : null);
}
// Merge with `_pendingState` if it exists, otherwise with existing state.
this.replaceState(
assign({}, this._pendingState || this.state, partialState),
callback
);
},
/**
* Replaces all of the state. Always use this or `setState` to mutate state.
* You should treat `this.state` as immutable.
*
* There is no guarantee that `this.state` will be immediately updated, so
* accessing `this.state` after calling this method may return the old value.
*
* @param {object} completeState Next state.
* @param {?function} callback Called after state is updated.
* @final
* @protected
*/
replaceState: function(completeState, callback) {
validateLifeCycleOnReplaceState(this);
this._pendingState = completeState;
if (this._compositeLifeCycleState !== CompositeLifeCycle.MOUNTING) {
// If we're in a componentWillMount handler, don't enqueue a rerender
// because ReactUpdates assumes we're in a browser context (which is wrong
// for server rendering) and we're about to do a render anyway.
// TODO: The callback here is ignored when setState is called from
// componentWillMount. Either fix it or disallow doing so completely in
// favor of getInitialState.
ReactUpdates.enqueueUpdate(this, callback);
}
},
/**
* Filters the context object to only contain keys specified in
* `contextTypes`, and asserts that they are valid.
*
* @param {object} context
* @return {?object}
* @private
*/
_processContext: function(context) {
var maskedContext = null;
var contextTypes = this.constructor.contextTypes;
if (contextTypes) {
maskedContext = {};
for (var contextName in contextTypes) {
maskedContext[contextName] = context[contextName];
}
if ("production" !== process.env.NODE_ENV) {
this._checkPropTypes(
contextTypes,
maskedContext,
ReactPropTypeLocations.context
);
}
}
return maskedContext;
},
/**
* @param {object} currentContext
* @return {object}
* @private
*/
_processChildContext: function(currentContext) {
var childContext = this.getChildContext && this.getChildContext();
var displayName = this.constructor.displayName || 'ReactCompositeComponent';
if (childContext) {
("production" !== process.env.NODE_ENV ? invariant(
typeof this.constructor.childContextTypes === 'object',
'%s.getChildContext(): childContextTypes must be defined in order to ' +
'use getChildContext().',
displayName
) : invariant(typeof this.constructor.childContextTypes === 'object'));
if ("production" !== process.env.NODE_ENV) {
this._checkPropTypes(
this.constructor.childContextTypes,
childContext,
ReactPropTypeLocations.childContext
);
}
for (var name in childContext) {
("production" !== process.env.NODE_ENV ? invariant(
name in this.constructor.childContextTypes,
'%s.getChildContext(): key "%s" is not defined in childContextTypes.',
displayName,
name
) : invariant(name in this.constructor.childContextTypes));
}
return assign({}, currentContext, childContext);
}
return currentContext;
},
/**
* Processes props by setting default values for unspecified props and
* asserting that the props are valid. Does not mutate its argument; returns
* a new props object with defaults merged in.
*
* @param {object} newProps
* @return {object}
* @private
*/
_processProps: function(newProps) {
if ("production" !== process.env.NODE_ENV) {
var propTypes = this.constructor.propTypes;
if (propTypes) {
this._checkPropTypes(propTypes, newProps, ReactPropTypeLocations.prop);
}
}
return newProps;
},
/**
* Assert that the props are valid
*
* @param {object} propTypes Map of prop name to a ReactPropType
* @param {object} props
* @param {string} location e.g. "prop", "context", "child context"
* @private
*/
_checkPropTypes: function(propTypes, props, location) {
// TODO: Stop validating prop types here and only use the element
// validation.
var componentName = this.constructor.displayName;
for (var propName in propTypes) {
if (propTypes.hasOwnProperty(propName)) {
var error =
propTypes[propName](props, propName, componentName, location);
if (error instanceof Error) {
// We may want to extend this logic for similar errors in
// renderComponent calls, so I'm abstracting it away into
// a function to minimize refactoring in the future
var addendum = getDeclarationErrorAddendum(this);
("production" !== process.env.NODE_ENV ? warning(false, error.message + addendum) : null);
}
}
}
},
/**
* If any of `_pendingElement`, `_pendingState`, or `_pendingForceUpdate`
* is set, update the component.
*
* @param {ReactReconcileTransaction} transaction
* @internal
*/
performUpdateIfNecessary: function(transaction) {
var compositeLifeCycleState = this._compositeLifeCycleState;
// Do not trigger a state transition if we are in the middle of mounting or
// receiving props because both of those will already be doing this.
if (compositeLifeCycleState === CompositeLifeCycle.MOUNTING ||
compositeLifeCycleState === CompositeLifeCycle.RECEIVING_PROPS) {
return;
}
if (this._pendingElement == null &&
this._pendingState == null &&
!this._pendingForceUpdate) {
return;
}
var nextContext = this.context;
var nextProps = this.props;
var nextElement = this._currentElement;
if (this._pendingElement != null) {
nextElement = this._pendingElement;
nextContext = this._processContext(nextElement._context);
nextProps = this._processProps(nextElement.props);
this._pendingElement = null;
this._compositeLifeCycleState = CompositeLifeCycle.RECEIVING_PROPS;
if (this.componentWillReceiveProps) {
this.componentWillReceiveProps(nextProps, nextContext);
}
}
this._compositeLifeCycleState = null;
var nextState = this._pendingState || this.state;
this._pendingState = null;
var shouldUpdate =
this._pendingForceUpdate ||
!this.shouldComponentUpdate ||
this.shouldComponentUpdate(nextProps, nextState, nextContext);
if ("production" !== process.env.NODE_ENV) {
if (typeof shouldUpdate === "undefined") {
console.warn(
(this.constructor.displayName || 'ReactCompositeComponent') +
'.shouldComponentUpdate(): Returned undefined instead of a ' +
'boolean value. Make sure to return true or false.'
);
}
}
if (shouldUpdate) {
this._pendingForceUpdate = false;
// Will set `this.props`, `this.state` and `this.context`.
this._performComponentUpdate(
nextElement,
nextProps,
nextState,
nextContext,
transaction
);
} else {
// If it's determined that a component should not update, we still want
// to set props and state.
this._currentElement = nextElement;
this.props = nextProps;
this.state = nextState;
this.context = nextContext;
// Owner cannot change because shouldUpdateReactComponent doesn't allow
// it. TODO: Remove this._owner completely.
this._owner = nextElement._owner;
}
},
/**
* Merges new props and state, notifies delegate methods of update and
* performs update.
*
* @param {ReactElement} nextElement Next element
* @param {object} nextProps Next public object to set as properties.
* @param {?object} nextState Next object to set as state.
* @param {?object} nextContext Next public object to set as context.
* @param {ReactReconcileTransaction} transaction
* @private
*/
_performComponentUpdate: function(
nextElement,
nextProps,
nextState,
nextContext,
transaction
) {
var prevElement = this._currentElement;
var prevProps = this.props;
var prevState = this.state;
var prevContext = this.context;
if (this.componentWillUpdate) {
this.componentWillUpdate(nextProps, nextState, nextContext);
}
this._currentElement = nextElement;
this.props = nextProps;
this.state = nextState;
this.context = nextContext;
// Owner cannot change because shouldUpdateReactComponent doesn't allow
// it. TODO: Remove this._owner completely.
this._owner = nextElement._owner;
this.updateComponent(
transaction,
prevElement
);
if (this.componentDidUpdate) {
transaction.getReactMountReady().enqueue(
this.componentDidUpdate.bind(this, prevProps, prevState, prevContext),
this
);
}
},
receiveComponent: function(nextElement, transaction) {
if (nextElement === this._currentElement &&
nextElement._owner != null) {
// Since elements are immutable after the owner is rendered,
// we can do a cheap identity compare here to determine if this is a
// superfluous reconcile. It's possible for state to be mutable but such
// change should trigger an update of the owner which would recreate
// the element. We explicitly check for the existence of an owner since
// it's possible for a element created outside a composite to be
// deeply mutated and reused.
return;
}
ReactComponent.Mixin.receiveComponent.call(
this,
nextElement,
transaction
);
},
/**
* Updates the component's currently mounted DOM representation.
*
* By default, this implements React's rendering and reconciliation algorithm.
* Sophisticated clients may wish to override this.
*
* @param {ReactReconcileTransaction} transaction
* @param {ReactElement} prevElement
* @internal
* @overridable
*/
updateComponent: ReactPerf.measure(
'ReactCompositeComponent',
'updateComponent',
function(transaction, prevParentElement) {
ReactComponent.Mixin.updateComponent.call(
this,
transaction,
prevParentElement
);
var prevComponentInstance = this._renderedComponent;
var prevElement = prevComponentInstance._currentElement;
var nextElement = this._renderValidatedComponent();
if (shouldUpdateReactComponent(prevElement, nextElement)) {
prevComponentInstance.receiveComponent(nextElement, transaction);
} else {
// These two IDs are actually the same! But nothing should rely on that.
var thisID = this._rootNodeID;
var prevComponentID = prevComponentInstance._rootNodeID;
prevComponentInstance.unmountComponent();
this._renderedComponent = instantiateReactComponent(
nextElement,
this._currentElement.type
);
var nextMarkup = this._renderedComponent.mountComponent(
thisID,
transaction,
this._mountDepth + 1
);
ReactComponent.BackendIDOperations.dangerouslyReplaceNodeWithMarkupByID(
prevComponentID,
nextMarkup
);
}
}
),
/**
* Forces an update. This should only be invoked when it is known with
* certainty that we are **not** in a DOM transaction.
*
* You may want to call this when you know that some deeper aspect of the
* component's state has changed but `setState` was not called.
*
* This will not invoke `shouldUpdateComponent`, but it will invoke
* `componentWillUpdate` and `componentDidUpdate`.
*
* @param {?function} callback Called after update is complete.
* @final
* @protected
*/
forceUpdate: function(callback) {
var compositeLifeCycleState = this._compositeLifeCycleState;
("production" !== process.env.NODE_ENV ? invariant(
this.isMounted() ||
compositeLifeCycleState === CompositeLifeCycle.MOUNTING,
'forceUpdate(...): Can only force an update on mounted or mounting ' +
'components.'
) : invariant(this.isMounted() ||
compositeLifeCycleState === CompositeLifeCycle.MOUNTING));
("production" !== process.env.NODE_ENV ? invariant(
compositeLifeCycleState !== CompositeLifeCycle.UNMOUNTING &&
ReactCurrentOwner.current == null,
'forceUpdate(...): Cannot force an update while unmounting component ' +
'or within a `render` function.'
) : invariant(compositeLifeCycleState !== CompositeLifeCycle.UNMOUNTING &&
ReactCurrentOwner.current == null));
this._pendingForceUpdate = true;
ReactUpdates.enqueueUpdate(this, callback);
},
/**
* @private
*/
_renderValidatedComponent: ReactPerf.measure(
'ReactCompositeComponent',
'_renderValidatedComponent',
function() {
var renderedComponent;
var previousContext = ReactContext.current;
ReactContext.current = this._processChildContext(
this._currentElement._context
);
ReactCurrentOwner.current = this;
try {
renderedComponent = this.render();
if (renderedComponent === null || renderedComponent === false) {
renderedComponent = ReactEmptyComponent.getEmptyComponent();
ReactEmptyComponent.registerNullComponentID(this._rootNodeID);
} else {
ReactEmptyComponent.deregisterNullComponentID(this._rootNodeID);
}
} finally {
ReactContext.current = previousContext;
ReactCurrentOwner.current = null;
}
("production" !== process.env.NODE_ENV ? invariant(
ReactElement.isValidElement(renderedComponent),
'%s.render(): A valid ReactComponent must be returned. You may have ' +
'returned undefined, an array or some other invalid object.',
this.constructor.displayName || 'ReactCompositeComponent'
) : invariant(ReactElement.isValidElement(renderedComponent)));
return renderedComponent;
}
),
/**
* @private
*/
_bindAutoBindMethods: function() {
for (var autoBindKey in this.__reactAutoBindMap) {
if (!this.__reactAutoBindMap.hasOwnProperty(autoBindKey)) {
continue;
}
var method = this.__reactAutoBindMap[autoBindKey];
this[autoBindKey] = this._bindAutoBindMethod(ReactErrorUtils.guard(
method,
this.constructor.displayName + '.' + autoBindKey
));
}
},
/**
* Binds a method to the component.
*
* @param {function} method Method to be bound.
* @private
*/
_bindAutoBindMethod: function(method) {
var component = this;
var boundMethod = method.bind(component);
if ("production" !== process.env.NODE_ENV) {
boundMethod.__reactBoundContext = component;
boundMethod.__reactBoundMethod = method;
boundMethod.__reactBoundArguments = null;
var componentName = component.constructor.displayName;
var _bind = boundMethod.bind;
boundMethod.bind = function(newThis ) {for (var args=[],$__0=1,$__1=arguments.length;$__0<$__1;$__0++) args.push(arguments[$__0]);
// User is trying to bind() an autobound method; we effectively will
// ignore the value of "this" that the user is trying to use, so
// let's warn.
if (newThis !== component && newThis !== null) {
monitorCodeUse('react_bind_warning', { component: componentName });
console.warn(
'bind(): React component methods may only be bound to the ' +
'component instance. See ' + componentName
);
} else if (!args.length) {
monitorCodeUse('react_bind_warning', { component: componentName });
console.warn(
'bind(): You are binding a component method to the component. ' +
'React does this for you automatically in a high-performance ' +
'way, so you can safely remove this call. See ' + componentName
);
return boundMethod;
}
var reboundMethod = _bind.apply(boundMethod, arguments);
reboundMethod.__reactBoundContext = component;
reboundMethod.__reactBoundMethod = method;
reboundMethod.__reactBoundArguments = args;
return reboundMethod;
};
}
return boundMethod;
}
};
var ReactCompositeComponentBase = function() {};
assign(
ReactCompositeComponentBase.prototype,
ReactComponent.Mixin,
ReactOwner.Mixin,
ReactPropTransferer.Mixin,
ReactCompositeComponentMixin
);
/**
* Module for creating composite components.
*
* @class ReactCompositeComponent
* @extends ReactComponent
* @extends ReactOwner
* @extends ReactPropTransferer
*/
var ReactCompositeComponent = {
LifeCycle: CompositeLifeCycle,
Base: ReactCompositeComponentBase,
/**
* Creates a composite component class given a class specification.
*
* @param {object} spec Class specification (which must define `render`).
* @return {function} Component constructor function.
* @public
*/
createClass: function(spec) {
var Constructor = function(props) {
// This constructor is overridden by mocks. The argument is used
// by mocks to assert on what gets mounted. This will later be used
// by the stand-alone class implementation.
};
Constructor.prototype = new ReactCompositeComponentBase();
Constructor.prototype.constructor = Constructor;
injectedMixins.forEach(
mixSpecIntoComponent.bind(null, Constructor)
);
mixSpecIntoComponent(Constructor, spec);
// Initialize the defaultProps property after all mixins have been merged
if (Constructor.getDefaultProps) {
Constructor.defaultProps = Constructor.getDefaultProps();
}
("production" !== process.env.NODE_ENV ? invariant(
Constructor.prototype.render,
'createClass(...): Class specification must implement a `render` method.'
) : invariant(Constructor.prototype.render));
if ("production" !== process.env.NODE_ENV) {
if (Constructor.prototype.componentShouldUpdate) {
monitorCodeUse(
'react_component_should_update_warning',
{ component: spec.displayName }
);
console.warn(
(spec.displayName || 'A component') + ' has a method called ' +
'componentShouldUpdate(). Did you mean shouldComponentUpdate()? ' +
'The name is phrased as a question because the function is ' +
'expected to return a value.'
);
}
}
// Reduce time spent doing lookups by setting these on the prototype.
for (var methodName in ReactCompositeComponentInterface) {
if (!Constructor.prototype[methodName]) {
Constructor.prototype[methodName] = null;
}
}
if ("production" !== process.env.NODE_ENV) {
return ReactLegacyElement.wrapFactory(
ReactElementValidator.createFactory(Constructor)
);
}
return ReactLegacyElement.wrapFactory(
ReactElement.createFactory(Constructor)
);
},
injection: {
injectMixin: function(mixin) {
injectedMixins.push(mixin);
}
}
};
module.exports = ReactCompositeComponent;
| demns/todomvc-perf-comparison | todomvc/react/node_modules/react/lib/ReactCompositeComponent.js | JavaScript | mit | 48,272 |
<?php
/**
* System messages translation for CodeIgniter(tm)
*
* @author CodeIgniter community
* @author Mutasim Ridlo, S.Kom
* @copyright Copyright (c) 2014 - 2015, British Columbia Institute of Technology (http://bcit.ca/)
* @license http://opensource.org/licenses/MIT MIT License
* @link http://codeigniter.com
*/
defined('BASEPATH') OR exit('No direct script access allowed');
$lang['ftp_no_connection'] = 'Tidak dapat menemukan ID koneksi yang sah. Pastikan Anda terhubung sebelum melakukan rutinitas berkas.';
$lang['ftp_unable_to_connect'] = 'Tidak dapat terhubung ke server FTP Anda menggunakan nama host yang disediakan.';
$lang['ftp_unable_to_login'] = 'Tidak dapat masuk ke server FTP Anda. Silakan periksa nama pengguna dan password Anda.';
$lang['ftp_unable_to_mkdir'] = 'Tidak dapat membuat direktori yang telah Anda tentukan.';
$lang['ftp_unable_to_changedir'] = 'Tidak dapat mengubah direktori.';
$lang['ftp_unable_to_chmod'] = 'Tidak dapat mengatur hak akses berkas. Silakan periksa jalur Anda.';
$lang['ftp_unable_to_upload'] = 'Tidak dapat mengunggah berkas yang ditentukan. Silakan periksa jalur Anda.';
$lang['ftp_unable_to_download'] = 'Tidak dapat mengunduh berkas yang ditentukan. Silakan periksa jalur Anda.';
$lang['ftp_no_source_file'] = 'Tidak dapat menemukan sumber berkas. Silakan periksa jalur Anda.';
$lang['ftp_unable_to_rename'] = 'Tidak dapat mengubah nama berkas.';
$lang['ftp_unable_to_delete'] = 'Tidak dapat menghapus berkas.';
$lang['ftp_unable_to_move'] = 'Tidak dapat memindahkan berkas. Pastikan direktori tujuan ada.'; | sbrodin/FolletXmasGifts | system/language/indonesian/ftp_lang.php | PHP | mit | 1,570 |
ALTER TABLE db_version CHANGE COLUMN required_z0792_s0103_02_mangos_command required_z0801_s0114_02_mangos_command bit;
DELETE FROM command WHERE name IN ('ticket');
INSERT INTO command (name, security, help) VALUES
('ticket',2,'Syntax: .ticket on\r\n .ticket off\r\n .ticket #num\r\n .ticket $character_name\r\n .ticket respond #num $response\r\n .ticket respond $character_name $response\r\n\r\non/off for GMs to show or not a new ticket directly, $character_name to show ticket of this character, #num to show ticket #num.');
| vanilla-wow/MaNGOSZero | sql/updates/z0801_s0114_02_mangos_command.sql | SQL | gpl-2.0 | 565 |
// SPDX-License-Identifier: GPL-2.0+
/*
* vsp1_drv.c -- R-Car VSP1 Driver
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
#include <media/rcar-fcp.h>
#include <media/v4l2-subdev.h>
#include "vsp1.h"
#include "vsp1_brx.h"
#include "vsp1_clu.h"
#include "vsp1_dl.h"
#include "vsp1_drm.h"
#include "vsp1_hgo.h"
#include "vsp1_hgt.h"
#include "vsp1_hsit.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_sru.h"
#include "vsp1_uds.h"
#include "vsp1_uif.h"
#include "vsp1_video.h"
/* -----------------------------------------------------------------------------
* Interrupt Handling
*/
static irqreturn_t vsp1_irq_handler(int irq, void *data)
{
u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
struct vsp1_device *vsp1 = data;
irqreturn_t ret = IRQ_NONE;
unsigned int i;
u32 status;
for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
if (wpf == NULL)
continue;
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
if (status & VI6_WFP_IRQ_STA_DFE) {
vsp1_pipeline_frame_end(wpf->entity.pipe);
ret = IRQ_HANDLED;
}
}
return ret;
}
/* -----------------------------------------------------------------------------
* Entities
*/
/*
* vsp1_create_sink_links - Create links from all sources to the given sink
*
* This function creates media links from all valid sources to the given sink
* pad. Links that would be invalid according to the VSP1 hardware capabilities
* are skipped. Those include all links
*
* - from a UDS to a UDS (UDS entities can't be chained)
* - from an entity to itself (no loops are allowed)
*
* Furthermore, the BRS can't be connected to histogram generators, but no
* special check is currently needed as all VSP instances that include a BRS
* have no histogram generator.
*/
static int vsp1_create_sink_links(struct vsp1_device *vsp1,
struct vsp1_entity *sink)
{
struct media_entity *entity = &sink->subdev.entity;
struct vsp1_entity *source;
unsigned int pad;
int ret;
list_for_each_entry(source, &vsp1->entities, list_dev) {
u32 flags;
if (source->type == sink->type)
continue;
if (source->type == VSP1_ENTITY_HGO ||
source->type == VSP1_ENTITY_HGT ||
source->type == VSP1_ENTITY_LIF ||
source->type == VSP1_ENTITY_WPF)
continue;
flags = source->type == VSP1_ENTITY_RPF &&
sink->type == VSP1_ENTITY_WPF &&
source->index == sink->index
? MEDIA_LNK_FL_ENABLED : 0;
for (pad = 0; pad < entity->num_pads; ++pad) {
if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK))
continue;
ret = media_create_pad_link(&source->subdev.entity,
source->source_pad,
entity, pad, flags);
if (ret < 0)
return ret;
if (flags & MEDIA_LNK_FL_ENABLED)
source->sink = sink;
}
}
return 0;
}
static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
{
struct vsp1_entity *entity;
unsigned int i;
int ret;
list_for_each_entry(entity, &vsp1->entities, list_dev) {
if (entity->type == VSP1_ENTITY_LIF ||
entity->type == VSP1_ENTITY_RPF)
continue;
ret = vsp1_create_sink_links(vsp1, entity);
if (ret < 0)
return ret;
}
if (vsp1->hgo) {
ret = media_create_pad_link(&vsp1->hgo->histo.entity.subdev.entity,
HISTO_PAD_SOURCE,
&vsp1->hgo->histo.video.entity, 0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret < 0)
return ret;
}
if (vsp1->hgt) {
ret = media_create_pad_link(&vsp1->hgt->histo.entity.subdev.entity,
HISTO_PAD_SOURCE,
&vsp1->hgt->histo.video.entity, 0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret < 0)
return ret;
}
for (i = 0; i < vsp1->info->lif_count; ++i) {
if (!vsp1->lif[i])
continue;
ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity,
RWPF_PAD_SOURCE,
&vsp1->lif[i]->entity.subdev.entity,
LIF_PAD_SINK, 0);
if (ret < 0)
return ret;
}
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rpf = vsp1->rpf[i];
ret = media_create_pad_link(&rpf->video->video.entity, 0,
&rpf->entity.subdev.entity,
RWPF_PAD_SINK,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret < 0)
return ret;
}
for (i = 0; i < vsp1->info->wpf_count; ++i) {
/*
* Connect the video device to the WPF. All connections are
* immutable.
*/
struct vsp1_rwpf *wpf = vsp1->wpf[i];
ret = media_create_pad_link(&wpf->entity.subdev.entity,
RWPF_PAD_SOURCE,
&wpf->video->video.entity, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret < 0)
return ret;
}
return 0;
}
static void vsp1_destroy_entities(struct vsp1_device *vsp1)
{
struct vsp1_entity *entity, *_entity;
struct vsp1_video *video, *_video;
list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) {
list_del(&entity->list_dev);
vsp1_entity_destroy(entity);
}
list_for_each_entry_safe(video, _video, &vsp1->videos, list) {
list_del(&video->list);
vsp1_video_cleanup(video);
}
v4l2_device_unregister(&vsp1->v4l2_dev);
if (vsp1->info->uapi)
media_device_unregister(&vsp1->media_dev);
media_device_cleanup(&vsp1->media_dev);
if (!vsp1->info->uapi)
vsp1_drm_cleanup(vsp1);
}
static int vsp1_create_entities(struct vsp1_device *vsp1)
{
struct media_device *mdev = &vsp1->media_dev;
struct v4l2_device *vdev = &vsp1->v4l2_dev;
struct vsp1_entity *entity;
unsigned int i;
int ret;
mdev->dev = vsp1->dev;
mdev->hw_revision = vsp1->version;
strscpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
dev_name(mdev->dev));
media_device_init(mdev);
vsp1->media_ops.link_setup = vsp1_entity_link_setup;
/*
* Don't perform link validation when the userspace API is disabled as
* the pipeline is configured internally by the driver in that case, and
* its configuration can thus be trusted.
*/
if (vsp1->info->uapi)
vsp1->media_ops.link_validate = v4l2_subdev_link_validate;
vdev->mdev = mdev;
ret = v4l2_device_register(vsp1->dev, vdev);
if (ret < 0) {
dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n",
ret);
goto done;
}
/* Instantiate all the entities. */
if (vsp1_feature(vsp1, VSP1_HAS_BRS)) {
vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS);
if (IS_ERR(vsp1->brs)) {
ret = PTR_ERR(vsp1->brs);
goto done;
}
list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
}
if (vsp1_feature(vsp1, VSP1_HAS_BRU)) {
vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU);
if (IS_ERR(vsp1->bru)) {
ret = PTR_ERR(vsp1->bru);
goto done;
}
list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
}
if (vsp1_feature(vsp1, VSP1_HAS_CLU)) {
vsp1->clu = vsp1_clu_create(vsp1);
if (IS_ERR(vsp1->clu)) {
ret = PTR_ERR(vsp1->clu);
goto done;
}
list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
}
vsp1->hsi = vsp1_hsit_create(vsp1, true);
if (IS_ERR(vsp1->hsi)) {
ret = PTR_ERR(vsp1->hsi);
goto done;
}
list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
vsp1->hst = vsp1_hsit_create(vsp1, false);
if (IS_ERR(vsp1->hst)) {
ret = PTR_ERR(vsp1->hst);
goto done;
}
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
vsp1->hgo = vsp1_hgo_create(vsp1);
if (IS_ERR(vsp1->hgo)) {
ret = PTR_ERR(vsp1->hgo);
goto done;
}
list_add_tail(&vsp1->hgo->histo.entity.list_dev,
&vsp1->entities);
}
if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) {
vsp1->hgt = vsp1_hgt_create(vsp1);
if (IS_ERR(vsp1->hgt)) {
ret = PTR_ERR(vsp1->hgt);
goto done;
}
list_add_tail(&vsp1->hgt->histo.entity.list_dev,
&vsp1->entities);
}
/*
* The LIFs are only supported when used in conjunction with the DU, in
* which case the userspace API is disabled. If the userspace API is
* enabled skip the LIFs, even when present.
*/
if (!vsp1->info->uapi) {
for (i = 0; i < vsp1->info->lif_count; ++i) {
struct vsp1_lif *lif;
lif = vsp1_lif_create(vsp1, i);
if (IS_ERR(lif)) {
ret = PTR_ERR(lif);
goto done;
}
vsp1->lif[i] = lif;
list_add_tail(&lif->entity.list_dev, &vsp1->entities);
}
}
if (vsp1_feature(vsp1, VSP1_HAS_LUT)) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
goto done;
}
list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
}
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rpf;
rpf = vsp1_rpf_create(vsp1, i);
if (IS_ERR(rpf)) {
ret = PTR_ERR(rpf);
goto done;
}
vsp1->rpf[i] = rpf;
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
if (vsp1->info->uapi) {
struct vsp1_video *video = vsp1_video_create(vsp1, rpf);
if (IS_ERR(video)) {
ret = PTR_ERR(video);
goto done;
}
list_add_tail(&video->list, &vsp1->videos);
}
}
if (vsp1_feature(vsp1, VSP1_HAS_SRU)) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
goto done;
}
list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
}
for (i = 0; i < vsp1->info->uds_count; ++i) {
struct vsp1_uds *uds;
uds = vsp1_uds_create(vsp1, i);
if (IS_ERR(uds)) {
ret = PTR_ERR(uds);
goto done;
}
vsp1->uds[i] = uds;
list_add_tail(&uds->entity.list_dev, &vsp1->entities);
}
for (i = 0; i < vsp1->info->uif_count; ++i) {
struct vsp1_uif *uif;
uif = vsp1_uif_create(vsp1, i);
if (IS_ERR(uif)) {
ret = PTR_ERR(uif);
goto done;
}
vsp1->uif[i] = uif;
list_add_tail(&uif->entity.list_dev, &vsp1->entities);
}
for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf;
wpf = vsp1_wpf_create(vsp1, i);
if (IS_ERR(wpf)) {
ret = PTR_ERR(wpf);
goto done;
}
vsp1->wpf[i] = wpf;
list_add_tail(&wpf->entity.list_dev, &vsp1->entities);
if (vsp1->info->uapi) {
struct vsp1_video *video = vsp1_video_create(vsp1, wpf);
if (IS_ERR(video)) {
ret = PTR_ERR(video);
goto done;
}
list_add_tail(&video->list, &vsp1->videos);
}
}
/* Register all subdevs. */
list_for_each_entry(entity, &vsp1->entities, list_dev) {
ret = v4l2_device_register_subdev(&vsp1->v4l2_dev,
&entity->subdev);
if (ret < 0)
goto done;
}
/*
* Create links and register subdev nodes if the userspace API is
* enabled or initialize the DRM pipeline otherwise.
*/
if (vsp1->info->uapi) {
ret = vsp1_uapi_create_links(vsp1);
if (ret < 0)
goto done;
ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
if (ret < 0)
goto done;
ret = media_device_register(mdev);
} else {
ret = vsp1_drm_init(vsp1);
}
done:
if (ret < 0)
vsp1_destroy_entities(vsp1);
return ret;
}
int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
{
unsigned int timeout;
u32 status;
status = vsp1_read(vsp1, VI6_STATUS);
if (!(status & VI6_STATUS_SYS_ACT(index)))
return 0;
vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index));
for (timeout = 10; timeout > 0; --timeout) {
status = vsp1_read(vsp1, VI6_STATUS);
if (!(status & VI6_STATUS_SYS_ACT(index)))
break;
usleep_range(1000, 2000);
}
if (!timeout) {
dev_err(vsp1->dev, "failed to reset wpf.%u\n", index);
return -ETIMEDOUT;
}
return 0;
}
static int vsp1_device_init(struct vsp1_device *vsp1)
{
unsigned int i;
int ret;
/* Reset any channel that might be running. */
for (i = 0; i < vsp1->info->wpf_count; ++i) {
ret = vsp1_reset_wpf(vsp1, i);
if (ret < 0)
return ret;
}
vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
(8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
for (i = 0; i < vsp1->info->rpf_count; ++i)
vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
for (i = 0; i < vsp1->info->uds_count; ++i)
vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
for (i = 0; i < vsp1->info->uif_count; ++i)
vsp1_write(vsp1, VI6_DPR_UIF_ROUTE(i), VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
if (vsp1_feature(vsp1, VSP1_HAS_BRS))
vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
vsp1_dlm_setup(vsp1);
return 0;
}
/*
* vsp1_device_get - Acquire the VSP1 device
*
* Make sure the device is not suspended and initialize it if needed.
*
* Return 0 on success or a negative error code otherwise.
*/
int vsp1_device_get(struct vsp1_device *vsp1)
{
int ret;
ret = pm_runtime_get_sync(vsp1->dev);
if (ret < 0) {
pm_runtime_put_noidle(vsp1->dev);
return ret;
}
return 0;
}
/*
* vsp1_device_put - Release the VSP1 device
*
* Decrement the VSP1 reference count and cleanup the device if the last
* reference is released.
*/
void vsp1_device_put(struct vsp1_device *vsp1)
{
pm_runtime_put_sync(vsp1->dev);
}
/* -----------------------------------------------------------------------------
* Power Management
*/
static int __maybe_unused vsp1_pm_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
/*
* When used as part of a display pipeline, the VSP is stopped and
* restarted explicitly by the DU.
*/
if (!vsp1->drm)
vsp1_video_suspend(vsp1);
pm_runtime_force_suspend(vsp1->dev);
return 0;
}
static int __maybe_unused vsp1_pm_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
pm_runtime_force_resume(vsp1->dev);
/*
* When used as part of a display pipeline, the VSP is stopped and
* restarted explicitly by the DU.
*/
if (!vsp1->drm)
vsp1_video_resume(vsp1);
return 0;
}
static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
rcar_fcp_disable(vsp1->fcp);
return 0;
}
static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
int ret;
if (vsp1->info) {
ret = vsp1_device_init(vsp1);
if (ret < 0)
return ret;
}
return rcar_fcp_enable(vsp1->fcp);
}
static const struct dev_pm_ops vsp1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
* Platform Driver
*/
static const struct vsp1_device_info vsp1_device_infos[] = {
{
.version = VI6_IP_VERSION_MODEL_VSPS_H2,
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
| VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
| VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
.model = "VSP1-R",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
.model = "VSP1-D",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 1,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPS_M2,
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
| VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
| VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 1,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPS_V2H,
.model = "VSP1V-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
| VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_V2H,
.model = "VSP1V-D",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 1,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
.model = "VSP2-I",
.gen = 3,
.features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
| VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
| VSP1_HAS_WPF_VFLIP,
.rpf_count = 1,
.uds_count = 1,
.wpf_count = 1,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
.model = "VSP2-BD",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
.model = "VSP2-BC",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
| VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBS_GEN3,
.model = "VSP2-BS",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP,
.rpf_count = 2,
.wpf_count = 1,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
.model = "VSP2-D",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL,
.lif_count = 1,
.rpf_count = 5,
.uif_count = 1,
.wpf_count = 2,
.num_bru_inputs = 5,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_V3,
.model = "VSP2-D",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU,
.lif_count = 1,
.rpf_count = 5,
.uif_count = 1,
.wpf_count = 1,
.num_bru_inputs = 5,
}, {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
.lif_count = 2,
.rpf_count = 5,
.uif_count = 2,
.wpf_count = 2,
.num_bru_inputs = 5,
},
};
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
struct device_node *fcp_node;
struct resource *irq;
struct resource *io;
unsigned int i;
int ret;
vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
if (vsp1 == NULL)
return -ENOMEM;
vsp1->dev = &pdev->dev;
INIT_LIST_HEAD(&vsp1->entities);
INIT_LIST_HEAD(&vsp1->videos);
platform_set_drvdata(pdev, vsp1);
/* I/O and IRQ resources (clock managed by the clock PM domain). */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "missing IRQ\n");
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler,
IRQF_SHARED, dev_name(&pdev->dev), vsp1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
return ret;
}
/* FCP (optional). */
fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
if (fcp_node) {
vsp1->fcp = rcar_fcp_get(fcp_node);
of_node_put(fcp_node);
if (IS_ERR(vsp1->fcp)) {
dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
PTR_ERR(vsp1->fcp));
return PTR_ERR(vsp1->fcp);
}
/*
* When the FCP is present, it handles all bus master accesses
* for the VSP and must thus be used in place of the VSP device
* to map DMA buffers.
*/
vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp);
} else {
vsp1->bus_master = vsp1->dev;
}
/* Configure device parameters based on the version register. */
pm_runtime_enable(&pdev->dev);
ret = vsp1_device_get(vsp1);
if (ret < 0)
goto done;
vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
vsp1_device_put(vsp1);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
vsp1_device_infos[i].version) {
vsp1->info = &vsp1_device_infos[i];
break;
}
}
if (!vsp1->info) {
dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
vsp1->version);
ret = -ENXIO;
goto done;
}
dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
/* Instantiate entities. */
ret = vsp1_create_entities(vsp1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to create entities\n");
goto done;
}
done:
if (ret)
pm_runtime_disable(&pdev->dev);
return ret;
}
static int vsp1_remove(struct platform_device *pdev)
{
struct vsp1_device *vsp1 = platform_get_drvdata(pdev);
vsp1_destroy_entities(vsp1);
rcar_fcp_put(vsp1->fcp);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp1" },
{ .compatible = "renesas,vsp2" },
{ },
};
MODULE_DEVICE_TABLE(of, vsp1_of_match);
static struct platform_driver vsp1_platform_driver = {
.probe = vsp1_probe,
.remove = vsp1_remove,
.driver = {
.name = "vsp1",
.pm = &vsp1_pm_ops,
.of_match_table = vsp1_of_match,
},
};
module_platform_driver(vsp1_platform_driver);
MODULE_ALIAS("vsp1");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_DESCRIPTION("Renesas VSP1 Driver");
MODULE_LICENSE("GPL");
| apopple/linux | drivers/media/platform/vsp1/vsp1_drv.c | C | gpl-2.0 | 21,913 |
/*
* Copyright (C) 2012-2018 Team Kodi
* This file is part of Kodi - https://kodi.tv
*
* SPDX-License-Identifier: GPL-2.0-or-later
* See LICENSES/README.md for more information.
*/
#pragma once
#include "FileItem.h"
#include <string>
#include <Neptune/Source/Core/NptReferences.h>
#include <Neptune/Source/Core/NptStrings.h>
#include <Neptune/Source/Core/NptTypes.h>
class CUPnPServer;
class CFileItem;
class CThumbLoader;
class PLT_DeviceData;
class PLT_HttpRequestContext;
class PLT_MediaItemResource;
class PLT_MediaObject;
class NPT_String;
namespace MUSIC_INFO {
class CMusicInfoTag;
}
class CVideoInfoTag;
namespace UPNP
{
enum UPnPService {
UPnPServiceNone = 0,
UPnPClient,
UPnPContentDirectory,
UPnPPlayer,
UPnPRenderer
};
class CResourceFinder {
public:
CResourceFinder(const char* protocol, const char* content = NULL);
bool operator()(const PLT_MediaItemResource& resource) const;
private:
NPT_String m_Protocol;
NPT_String m_Content;
};
enum EClientQuirks
{
ECLIENTQUIRKS_NONE = 0x0
/* Client requires folder's to be marked as storageFolders as vendor type (360)*/
, ECLIENTQUIRKS_ONLYSTORAGEFOLDER = 0x01
/* Client can't handle subtypes for videoItems (360) */
, ECLIENTQUIRKS_BASICVIDEOCLASS = 0x02
/* Client requires album to be set to [Unknown Series] to show title (WMP) */
, ECLIENTQUIRKS_UNKNOWNSERIES = 0x04
};
EClientQuirks GetClientQuirks(const PLT_HttpRequestContext* context);
enum EMediaControllerQuirks
{
EMEDIACONTROLLERQUIRKS_NONE = 0x00
/* Media Controller expects MIME type video/x-mkv instead of video/x-matroska (Samsung) */
, EMEDIACONTROLLERQUIRKS_X_MKV = 0x01
};
EMediaControllerQuirks GetMediaControllerQuirks(const PLT_DeviceData *device);
const char* GetMimeTypeFromExtension(const char* extension, const PLT_HttpRequestContext* context = NULL);
NPT_String GetMimeType(const CFileItem& item, const PLT_HttpRequestContext* context = NULL);
NPT_String GetMimeType(const char* filename, const PLT_HttpRequestContext* context = NULL);
const NPT_String GetProtocolInfo(const CFileItem& item, const char* protocol, const PLT_HttpRequestContext* context = NULL);
const std::string& CorrectAllItemsSortHack(const std::string &item);
NPT_Result PopulateTagFromObject(MUSIC_INFO::CMusicInfoTag& tag,
PLT_MediaObject& object,
PLT_MediaItemResource* resource = NULL,
UPnPService service = UPnPServiceNone);
NPT_Result PopulateTagFromObject(CVideoInfoTag& tag,
PLT_MediaObject& object,
PLT_MediaItemResource* resource = NULL,
UPnPService service = UPnPServiceNone);
NPT_Result PopulateObjectFromTag(MUSIC_INFO::CMusicInfoTag& tag,
PLT_MediaObject& object,
NPT_String* file_path,
PLT_MediaItemResource* resource,
EClientQuirks quirks,
UPnPService service = UPnPServiceNone);
NPT_Result PopulateObjectFromTag(CVideoInfoTag& tag,
PLT_MediaObject& object,
NPT_String* file_path,
PLT_MediaItemResource* resource,
EClientQuirks quirks,
UPnPService service = UPnPServiceNone);
PLT_MediaObject* BuildObject(CFileItem& item,
NPT_String& file_path,
bool with_count,
NPT_Reference<CThumbLoader>& thumb_loader,
const PLT_HttpRequestContext* context = NULL,
CUPnPServer* upnp_server = NULL,
UPnPService upnp_service = UPnPServiceNone);
CFileItemPtr BuildObject(PLT_MediaObject* entry,
UPnPService upnp_service = UPnPServiceNone);
bool GetResource(const PLT_MediaObject* entry, CFileItem& item);
CFileItemPtr GetFileItem(const NPT_String& uri, const NPT_String& meta);
}
| scbash/xbmc | xbmc/network/upnp/UPnPInternal.h | C | gpl-2.0 | 4,650 |
/*
* carlu - userspace testing utility for ar9170 devices
*
* Firmware parsers
*
* Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include "libusb.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "carlu.h"
#include "usb.h"
#include "debug.h"
int carlu_fw_check(struct carlu *ar)
{
struct carl9170fw_otus_desc *otus_desc;
otus_desc = carlfw_find_desc(ar->fw, (uint8_t *) OTUS_MAGIC,
sizeof(*otus_desc),
CARL9170FW_OTUS_DESC_CUR_VER);
if (!otus_desc) {
err("No valid OTUS descriptor found.\n");
return -EINVAL;
}
if (!carl9170fw_supports(otus_desc->feature_set, CARL9170FW_DUMMY_FEATURE)) {
err("Invalid Firmware Descriptor.\n");
return -EIO;
}
if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_UNUSABLE))
dbg("Firmware is marked as unuseable.\n");
info("Firmware Version: %d.\n", otus_desc->api_ver);
return 0;
}
int carlusb_fw_check(struct carlu *ar)
{
struct carl9170fw_otus_desc *otus_desc;
otus_desc = carlfw_find_desc(ar->fw, (uint8_t *) OTUS_MAGIC,
sizeof(*otus_desc),
CARL9170FW_OTUS_DESC_CUR_VER);
if (!otus_desc) {
err("No valid USB descriptor found.\n");
return -ENODATA;
}
if (!carl9170fw_supports(otus_desc->feature_set, CARL9170FW_DUMMY_FEATURE)) {
err("Invalid Firmware Descriptor.\n");
return -EINVAL;
}
if (!carl9170fw_supports(otus_desc->feature_set, CARL9170FW_USB_INIT_FIRMWARE)) {
err("Firmware does not know how to initialize USB core.\n");
return -EOPNOTSUPP;
}
if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_USB_DOWN_STREAM)) {
dbg("Enabled tx stream mode.\n");
ar->tx_stream = true;
ar->extra_headroom = sizeof(struct ar9170_stream);
}
if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_USB_UP_STREAM)) {
dbg("Enabled rx stream mode.\n");
ar->rx_stream = true;
}
if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_USB_RESP_EP2))
dbg("Firmware sends traps over EP2.\n");
ar->dma_chunk_size = le16_to_cpu(otus_desc->tx_frag_len);
ar->dma_chunks = otus_desc->tx_descs;
ar->rx_max = le16_to_cpu(otus_desc->rx_max_frame_len);
if (carl9170fw_supports(otus_desc->feature_set, CARL9170FW_MINIBOOT))
ar->miniboot_size = le16_to_cpu(otus_desc->miniboot_size);
return 0;
}
void carlu_fw_info(struct carlu *ar)
{
struct carl9170fw_motd_desc *motd_desc;
unsigned int fw_date;
motd_desc = carlfw_find_desc(ar->fw, (uint8_t *) MOTD_MAGIC,
sizeof(*motd_desc),
CARL9170FW_MOTD_DESC_CUR_VER);
if (motd_desc) {
fw_date = le32_to_cpu(motd_desc->fw_year_month_day);
info("Firmware Date: 2%.3d-%.2d-%.2d\n",
CARL9170FW_GET_YEAR(fw_date), CARL9170FW_GET_MONTH(fw_date),
CARL9170FW_GET_DAY(fw_date));
}
}
| PennartLoettring/Poettrix | rootfs/usr/lib/firmware/carl9170fw/tools/carlu/src/fw.c | C | gpl-2.0 | 3,649 |
// Copyright 2009 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "AudioCommon/Mixer.h"
#include "Common/CommonTypes.h"
class SoundStream
{
protected:
std::unique_ptr<Mixer> m_mixer;
public:
SoundStream() : m_mixer(new Mixer(48000)) {}
virtual ~SoundStream() {}
static bool isValid() { return false; }
Mixer* GetMixer() const { return m_mixer.get(); }
virtual bool Init() { return false; }
virtual void SetVolume(int) {}
virtual void SoundLoop() {}
virtual void Update() {}
virtual bool SetRunning(bool running) { return false; }
};
| degasus/dolphin | Source/Core/AudioCommon/SoundStream.h | C | gpl-2.0 | 643 |
/*
* Copyright (C) 2013 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include "hal-log.h"
#include "hal.h"
#include "hal-msg.h"
#include "hal-ipc.h"
static const btav_callbacks_t *cbs = NULL;
static bool interface_ready(void)
{
return cbs != NULL;
}
static void handle_conn_state(void *buf, uint16_t len, int fd)
{
struct hal_ev_a2dp_conn_state *ev = buf;
if (cbs->connection_state_cb)
cbs->connection_state_cb(ev->state,
(bt_bdaddr_t *) (ev->bdaddr));
}
static void handle_audio_state(void *buf, uint16_t len, int fd)
{
struct hal_ev_a2dp_audio_state *ev = buf;
if (cbs->audio_state_cb)
cbs->audio_state_cb(ev->state, (bt_bdaddr_t *)(ev->bdaddr));
}
static void handle_audio_config(void *buf, uint16_t len, int fd)
{
#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
struct hal_ev_a2dp_audio_config *ev = buf;
if (cbs->audio_config_cb)
cbs->audio_config_cb((bt_bdaddr_t *)(ev->bdaddr),
ev->sample_rate, ev->channel_count);
#endif
}
/*
* handlers will be called from notification thread context,
* index in table equals to 'opcode - HAL_MINIMUM_EVENT'
*/
static const struct hal_ipc_handler ev_handlers[] = {
/* HAL_EV_A2DP_CONN_STATE */
{ handle_conn_state, false, sizeof(struct hal_ev_a2dp_conn_state) },
/* HAL_EV_A2DP_AUDIO_STATE */
{ handle_audio_state, false, sizeof(struct hal_ev_a2dp_audio_state) },
/* HAL_EV_A2DP_AUDIO_CONFIG */
{ handle_audio_config, false, sizeof(struct hal_ev_a2dp_audio_config) },
};
static bt_status_t a2dp_connect(bt_bdaddr_t *bd_addr)
{
struct hal_cmd_a2dp_connect cmd;
DBG("");
if (!interface_ready())
return BT_STATUS_NOT_READY;
memcpy(cmd.bdaddr, bd_addr, sizeof(cmd.bdaddr));
return hal_ipc_cmd(HAL_SERVICE_ID_A2DP, HAL_OP_A2DP_CONNECT,
sizeof(cmd), &cmd, NULL, NULL, NULL);
}
static bt_status_t disconnect(bt_bdaddr_t *bd_addr)
{
struct hal_cmd_a2dp_disconnect cmd;
DBG("");
if (!interface_ready())
return BT_STATUS_NOT_READY;
memcpy(cmd.bdaddr, bd_addr, sizeof(cmd.bdaddr));
return hal_ipc_cmd(HAL_SERVICE_ID_A2DP, HAL_OP_A2DP_DISCONNECT,
sizeof(cmd), &cmd, NULL, NULL, NULL);
}
static bt_status_t init(btav_callbacks_t *callbacks)
{
struct hal_cmd_register_module cmd;
int ret;
DBG("");
if (interface_ready())
return BT_STATUS_DONE;
cbs = callbacks;
hal_ipc_register(HAL_SERVICE_ID_A2DP, ev_handlers,
sizeof(ev_handlers)/sizeof(ev_handlers[0]));
cmd.service_id = HAL_SERVICE_ID_A2DP;
cmd.mode = HAL_MODE_DEFAULT;
cmd.max_clients = 1;
ret = hal_ipc_cmd(HAL_SERVICE_ID_CORE, HAL_OP_REGISTER_MODULE,
sizeof(cmd), &cmd, NULL, NULL, NULL);
if (ret != BT_STATUS_SUCCESS) {
cbs = NULL;
hal_ipc_unregister(HAL_SERVICE_ID_A2DP);
}
return ret;
}
static void cleanup(void)
{
struct hal_cmd_unregister_module cmd;
DBG("");
if (!interface_ready())
return;
cmd.service_id = HAL_SERVICE_ID_A2DP;
hal_ipc_cmd(HAL_SERVICE_ID_CORE, HAL_OP_UNREGISTER_MODULE,
sizeof(cmd), &cmd, NULL, NULL, NULL);
hal_ipc_unregister(HAL_SERVICE_ID_A2DP);
cbs = NULL;
}
static btav_interface_t iface = {
.size = sizeof(iface),
.init = init,
.connect = a2dp_connect,
.disconnect = disconnect,
.cleanup = cleanup
};
btav_interface_t *bt_get_a2dp_interface(void)
{
return &iface;
}
| hmallat/bluez5 | bluez5/android/hal-a2dp.c | C | gpl-2.0 | 3,823 |
/**
* Copyright 2016 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Entry point into AMP for compilation with babel. Just loads amp.js and
// Babel's helpers.
import '../third_party/babel/custom-babel-helpers';
import './amp-shadow';
| DistroScale/amphtml | src/amp-shadow-babel.js | JavaScript | apache-2.0 | 799 |
@import Foundation;
@interface NSEntityDescription : NSObject
@end
@interface NSManagedObjectContext : NSObject
@end
@interface NSManagedObject : NSObject
- (__kindof NSManagedObject *)initWithEntity:(NSEntityDescription *)entity
insertIntoManagedObjectContext:(NSManagedObjectContext *)context;
@property(nonatomic, readonly, strong) NSEntityDescription *entity;
+ (NSEntityDescription *)entity;
@end
| apple/swift | test/SILGen/Inputs/usr/include/CoreData.h | C | apache-2.0 | 421 |
cask "prezi-video" do
version "1.13.0"
sha256 "477a3d199b1f108e3e1e394a93787fde89f499dea273937c0c1f5fd410b66410"
url "https://desktopassets.prezi.com/mac/prezi-video/releases/Prezi_Video_#{version}.dmg"
name "Prezi Video"
desc "Lets you interact with your content live as you stream or record"
homepage "https://prezi.com/video/"
pkg "Install Prezi Video.pkg"
uninstall quit: "com.prezi.PreziCast",
launchctl: "com.prezi.prezivideo.vcam.assistant",
pkgutil: [
"com.prezi.PreziCast",
"com.prezi.prezivideo.vcam.plugin",
],
delete: [
"/Applications/Prezi Video.app",
"/Library/CoreMediaIO/Plug-Ins/DAL/PreziAR.plugin",
]
zap trash: [
"~/Library/Application Support/com.prezi.PreziCast",
"~/Library/Preferences/com.prezi.PreziCast.plist",
"~/Library/Preferences/com.prezi.PreziVideo.vcam",
]
end
| danielbayley/homebrew-cask | Casks/prezi-video.rb | Ruby | bsd-2-clause | 955 |
<?php
/*
* CKFinder
* ========
* http://ckfinder.com
* Copyright (C) 2007-2012, CKSource - Frederico Knabben. All rights reserved.
*
* The software, this file and its contents are subject to the CKFinder
* License. Please read the license.txt file before using, installing, copying,
* modifying or distribute this file or part of its contents. The contents of
* this file is part of the Source Code of CKFinder.
*/
if (!defined('IN_CKFINDER')) exit;
/**
* @package CKFinder
* @subpackage Utils
* @copyright CKSource - Frederico Knabben
*/
/**
* @package CKFinder
* @subpackage Utils
* @copyright CKSource - Frederico Knabben
*/
class CKFinder_Connector_Utils_Misc
{
public static function getErrorMessage($number, $arg = "") {
$langCode = 'en';
if (!empty($_GET['langCode']) && preg_match("/^[a-z\-]+$/", $_GET['langCode'])) {
if (file_exists(CKFINDER_CONNECTOR_LANG_PATH . "/" . $_GET['langCode'] . ".php"))
$langCode = $_GET['langCode'];
}
include CKFINDER_CONNECTOR_LANG_PATH . "/" . $langCode . ".php";
if ($number) {
if (!empty ($GLOBALS['CKFLang']['Errors'][$number])) {
$errorMessage = str_replace("%1", $arg, $GLOBALS['CKFLang']['Errors'][$number]);
} else {
$errorMessage = str_replace("%1", $number, $GLOBALS['CKFLang']['ErrorUnknown']);
}
} else {
$errorMessage = "";
}
return $errorMessage;
}
/**
* Simulate the encodeURIComponent() function available in JavaScript
* @param string $str
* @return string
*/
public static function encodeURIComponent($str)
{
$revert = array('%21'=>'!', '%2A'=>'*', '%27'=>"'", '%28'=>'(', '%29'=>')');
return strtr(rawurlencode($str), $revert);
}
/**
* Convert any value to boolean, strings like "false", "FalSE" and "off" are also considered as false
*
* @static
* @access public
* @param mixed $value
* @return boolean
*/
public static function booleanValue($value)
{
if (strcasecmp("false", $value) == 0 || strcasecmp("off", $value) == 0 || !$value) {
return false;
} else {
return true;
}
}
/**
* @link http://pl.php.net/manual/en/function.imagecopyresampled.php
* replacement to imagecopyresampled that will deliver results that are almost identical except MUCH faster (very typically 30 times faster)
*
* @static
* @access public
* @param string $dst_image
* @param string $src_image
* @param int $dst_x
* @param int $dst_y
* @param int $src_x
* @param int $src_y
* @param int $dst_w
* @param int $dst_h
* @param int $src_w
* @param int $src_h
* @param int $quality
* @return boolean
*/
public static function fastImageCopyResampled (&$dst_image, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w, $dst_h, $src_w, $src_h, $quality = 3)
{
if (empty($src_image) || empty($dst_image)) {
return false;
}
if ($quality <= 1) {
$temp = imagecreatetruecolor ($dst_w + 1, $dst_h + 1);
imagecopyresized ($temp, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w + 1, $dst_h + 1, $src_w, $src_h);
imagecopyresized ($dst_image, $temp, 0, 0, 0, 0, $dst_w, $dst_h, $dst_w, $dst_h);
imagedestroy ($temp);
} elseif ($quality < 5 && (($dst_w * $quality) < $src_w || ($dst_h * $quality) < $src_h)) {
$tmp_w = $dst_w * $quality;
$tmp_h = $dst_h * $quality;
$temp = imagecreatetruecolor ($tmp_w + 1, $tmp_h + 1);
imagecopyresized ($temp, $src_image, 0, 0, $src_x, $src_y, $tmp_w + 1, $tmp_h + 1, $src_w, $src_h);
imagecopyresampled ($dst_image, $temp, $dst_x, $dst_y, 0, 0, $dst_w, $dst_h, $tmp_w, $tmp_h);
imagedestroy ($temp);
} else {
imagecopyresampled ($dst_image, $src_image, $dst_x, $dst_y, $src_x, $src_y, $dst_w, $dst_h, $src_w, $src_h);
}
return true;
}
/**
* @link http://pl.php.net/manual/pl/function.imagecreatefromjpeg.php
* function posted by e dot a dot schultz at gmail dot com
*
* @static
* @access public
* @param string $filename
* @return boolean
*/
public static function setMemoryForImage($imageWidth, $imageHeight, $imageBits, $imageChannels)
{
$MB = 1048576; // number of bytes in 1M
$K64 = 65536; // number of bytes in 64K
$TWEAKFACTOR = 2.4; // Or whatever works for you
$memoryNeeded = round( ( $imageWidth * $imageHeight
* $imageBits
* $imageChannels / 8
+ $K64
) * $TWEAKFACTOR
) + 3*$MB;
//ini_get('memory_limit') only works if compiled with "--enable-memory-limit" also
//Default memory limit is 8MB so well stick with that.
//To find out what yours is, view your php.ini file.
$memoryLimit = CKFinder_Connector_Utils_Misc::returnBytes(@ini_get('memory_limit'))/$MB;
// There are no memory limits, nothing to do
if ($memoryLimit == -1) {
return true;
}
if (!$memoryLimit) {
$memoryLimit = 8;
}
$memoryLimitMB = $memoryLimit * $MB;
if (function_exists('memory_get_usage')) {
if (memory_get_usage() + $memoryNeeded > $memoryLimitMB) {
$newLimit = $memoryLimit + ceil( ( memory_get_usage()
+ $memoryNeeded
- $memoryLimitMB
) / $MB
);
if (@ini_set( 'memory_limit', $newLimit . 'M' ) === false) {
return false;
}
}
} else {
if ($memoryNeeded + 3*$MB > $memoryLimitMB) {
$newLimit = $memoryLimit + ceil(( 3*$MB
+ $memoryNeeded
- $memoryLimitMB
) / $MB
);
if (false === @ini_set( 'memory_limit', $newLimit . 'M' )) {
return false;
}
}
}
return true;
}
/**
* convert shorthand php.ini notation into bytes, much like how the PHP source does it
* @link http://pl.php.net/manual/en/function.ini-get.php
*
* @static
* @access public
* @param string $val
* @return int
*/
public static function returnBytes($val) {
$val = trim($val);
if (!$val) {
return 0;
}
$last = strtolower($val[strlen($val)-1]);
switch($last) {
// The 'G' modifier is available since PHP 5.1.0
case 'g':
$val *= 1024;
case 'm':
$val *= 1024;
case 'k':
$val *= 1024;
}
return $val;
}
/**
* Checks if a value exists in an array (case insensitive)
*
* @static
* @access public
* @param string $needle
* @param array $haystack
* @return boolean
*/
public static function inArrayCaseInsensitive($needle, $haystack)
{
if (!$haystack || !is_array($haystack)) {
return false;
}
$lcase = array();
foreach ($haystack as $key => $val) {
$lcase[$key] = strtolower($val);
}
return in_array($needle, $lcase);
}
/**
* UTF-8 compatible version of basename()
*
* @static
* @access public
* @param string $file
* @return string
*/
public static function mbBasename($file)
{
$explode = explode('/', str_replace("\\", "/", $file));
return end($explode);
}
/**
* Checks whether the string is valid UTF8
* @static
* @access public
* @param string $string
* @return boolean
*/
public static function isValidUTF8($string)
{
if (strlen($string) == 0) {
return true;
}
return (preg_match('/^./us', $string) == 1);
}
/**
* Source: http://pl.php.net/imagecreate
* (optimized for speed and memory usage, but yet not very efficient)
*
* @static
* @access public
* @param string $filename
* @return resource
*/
public static function imageCreateFromBmp($filename)
{
//20 seconds seems to be a reasonable value to not kill a server and process images up to 1680x1050
@set_time_limit(20);
if (false === ($f1 = fopen($filename, "rb"))) {
return false;
}
$FILE = unpack("vfile_type/Vfile_size/Vreserved/Vbitmap_offset", fread($f1, 14));
if ($FILE['file_type'] != 19778) {
return false;
}
$BMP = unpack('Vheader_size/Vwidth/Vheight/vplanes/vbits_per_pixel'.
'/Vcompression/Vsize_bitmap/Vhoriz_resolution'.
'/Vvert_resolution/Vcolors_used/Vcolors_important', fread($f1, 40));
$BMP['colors'] = pow(2,$BMP['bits_per_pixel']);
if ($BMP['size_bitmap'] == 0) {
$BMP['size_bitmap'] = $FILE['file_size'] - $FILE['bitmap_offset'];
}
$BMP['bytes_per_pixel'] = $BMP['bits_per_pixel']/8;
$BMP['bytes_per_pixel2'] = ceil($BMP['bytes_per_pixel']);
$BMP['decal'] = ($BMP['width']*$BMP['bytes_per_pixel']/4);
$BMP['decal'] -= floor($BMP['width']*$BMP['bytes_per_pixel']/4);
$BMP['decal'] = 4-(4*$BMP['decal']);
if ($BMP['decal'] == 4) {
$BMP['decal'] = 0;
}
$PALETTE = array();
if ($BMP['colors'] < 16777216) {
$PALETTE = unpack('V'.$BMP['colors'], fread($f1, $BMP['colors']*4));
}
//2048x1536px@24bit don't even try to process larger files as it will probably fail
if ($BMP['size_bitmap'] > 3 * 2048 * 1536) {
return false;
}
$IMG = fread($f1, $BMP['size_bitmap']);
fclose($f1);
$VIDE = chr(0);
$res = imagecreatetruecolor($BMP['width'],$BMP['height']);
$P = 0;
$Y = $BMP['height']-1;
$line_length = $BMP['bytes_per_pixel']*$BMP['width'];
if ($BMP['bits_per_pixel'] == 24) {
while ($Y >= 0)
{
$X=0;
$temp = unpack( "C*", substr($IMG, $P, $line_length));
while ($X < $BMP['width'])
{
$offset = $X*3;
imagesetpixel($res, $X++, $Y, ($temp[$offset+3] << 16) + ($temp[$offset+2] << 8) + $temp[$offset+1]);
}
$Y--;
$P += $line_length + $BMP['decal'];
}
}
elseif ($BMP['bits_per_pixel'] == 8)
{
while ($Y >= 0)
{
$X=0;
$temp = unpack( "C*", substr($IMG, $P, $line_length));
while ($X < $BMP['width'])
{
imagesetpixel($res, $X++, $Y, $PALETTE[$temp[$X] +1]);
}
$Y--;
$P += $line_length + $BMP['decal'];
}
}
elseif ($BMP['bits_per_pixel'] == 4)
{
while ($Y >= 0)
{
$X=0;
$i = 1;
$low = true;
$temp = unpack( "C*", substr($IMG, $P, $line_length));
while ($X < $BMP['width'])
{
if ($low) {
$index = $temp[$i] >> 4;
}
else {
$index = $temp[$i++] & 0x0F;
}
$low = !$low;
imagesetpixel($res, $X++, $Y, $PALETTE[$index +1]);
}
$Y--;
$P += $line_length + $BMP['decal'];
}
}
elseif ($BMP['bits_per_pixel'] == 1)
{
$COLOR = unpack("n",$VIDE.substr($IMG,floor($P),1));
if (($P*8)%8 == 0) $COLOR[1] = $COLOR[1] >>7;
elseif (($P*8)%8 == 1) $COLOR[1] = ($COLOR[1] & 0x40)>>6;
elseif (($P*8)%8 == 2) $COLOR[1] = ($COLOR[1] & 0x20)>>5;
elseif (($P*8)%8 == 3) $COLOR[1] = ($COLOR[1] & 0x10)>>4;
elseif (($P*8)%8 == 4) $COLOR[1] = ($COLOR[1] & 0x8)>>3;
elseif (($P*8)%8 == 5) $COLOR[1] = ($COLOR[1] & 0x4)>>2;
elseif (($P*8)%8 == 6) $COLOR[1] = ($COLOR[1] & 0x2)>>1;
elseif (($P*8)%8 == 7) $COLOR[1] = ($COLOR[1] & 0x1);
$COLOR[1] = $PALETTE[$COLOR[1]+1];
}
else {
return false;
}
return $res;
}
}
| nkhanhquoc/waterfactory | source/backend/web/js/ckfinder/core/connector/php/php5/Utils/Misc.php | PHP | mit | 12,722 |
angular
.module('menuDemoPosition', ['ngMaterial'])
.config(function($mdIconProvider) {
$mdIconProvider
.iconSet("call", 'img/icons/sets/communication-icons.svg', 24)
.iconSet("social", 'img/icons/sets/social-icons.svg', 24);
})
.controller('PositionDemoCtrl', function DemoCtrl($mdDialog) {
var originatorEv;
this.openMenu = function($mdOpenMenu, ev) {
originatorEv = ev;
$mdOpenMenu(ev);
};
this.announceClick = function(index) {
$mdDialog.show(
$mdDialog.alert()
.title('You clicked!')
.textContent('You clicked the menu item at index ' + index)
.ok('Nice')
.targetEvent(originatorEv)
);
originatorEv = null;
};
});
| jelbourn/material | src/components/menu/demoMenuPositionModes/script.js | JavaScript | mit | 747 |
/* GLIB - Library of useful routines for C programming
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
*
* gpoll.c: poll(2) abstraction
* Copyright 1998 Owen Taylor
* Copyright 2008 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* Modified by the GLib Team and others 1997-2000. See the AUTHORS
* file for a list of people on the GLib Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GLib at ftp://ftp.gtk.org/pub/gtk/.
*/
/*
* MT safe
*/
#include "config.h"
/* Uncomment the next line (and the corresponding line in gmain.c) to
* enable debugging printouts if the environment variable
* G_MAIN_POLL_DEBUG is set to some value.
*/
/* #define G_MAIN_POLL_DEBUG */
#ifdef _WIN32
/* Always enable debugging printout on Windows, as it is more often
* needed there...
*/
#define G_MAIN_POLL_DEBUG
#endif
#include "glib.h"
#include <sys/types.h>
#include <time.h>
#include <stdlib.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif /* HAVE_SYS_TIME_H */
#ifdef GLIB_HAVE_SYS_POLL_H
# include <sys/poll.h>
# undef events /* AIX 4.1.5 & 4.3.2 define this for SVR3,4 compatibility */
# undef revents /* AIX 4.1.5 & 4.3.2 define this for SVR3,4 compatibility */
/* The poll() emulation on OS/X doesn't handle fds=NULL, nfds=0,
* so we prefer our own poll emulation.
*/
#if defined(_POLL_EMUL_H_) || defined(BROKEN_POLL)
#undef HAVE_POLL
#endif
#endif /* GLIB_HAVE_SYS_POLL_H */
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif /* HAVE_UNISTD_H */
#include <errno.h>
#ifdef G_OS_WIN32
#define STRICT
#include <windows.h>
#endif /* G_OS_WIN32 */
#include "galias.h"
#ifdef G_MAIN_POLL_DEBUG
extern gboolean _g_main_poll_debug;
#endif
#ifdef HAVE_POLL
/* SunOS has poll, but doesn't provide a prototype. */
# if defined (sun) && !defined (__SVR4)
extern gint poll (struct pollfd *fds, guint nfsd, gint timeout);
# endif /* !sun */
/**
* g_poll:
* @fds: file descriptors to poll
* @nfds: the number of file descriptors in @fds
* @timeout: amount of time to wait, in milliseconds, or -1 to wait forever
*
* Polls @fds, as with the poll() system call, but portably. (On
* systems that don't have poll(), it is emulated using select().)
* This is used internally by #GMainContext, but it can be called
* directly if you need to block until a file descriptor is ready, but
* don't want to run the full main loop.
*
* Each element of @fds is a #GPollFD describing a single file
* descriptor to poll. The %fd field indicates the file descriptor,
* and the %events field indicates the events to poll for. On return,
* the %revents fields will be filled with the events that actually
* occurred.
*
* On POSIX systems, the file descriptors in @fds can be any sort of
* file descriptor, but the situation is much more complicated on
* Windows. If you need to use g_poll() in code that has to run on
* Windows, the easiest solution is to construct all of your
* #GPollFD<!-- -->s with g_io_channel_win32_make_pollfd().
*
* Return value: the number of entries in @fds whose %revents fields
* were filled in, or 0 if the operation timed out, or -1 on error or
* if the call was interrupted.
*
* Since: 2.20
**/
gint
g_poll (GPollFD *fds,
guint nfds,
gint timeout)
{
return poll ((struct pollfd *)fds, nfds, timeout);
}
#else /* !HAVE_POLL */
#ifdef G_OS_WIN32
static int
poll_rest (gboolean poll_msgs,
HANDLE *handles,
gint nhandles,
GPollFD *fds,
guint nfds,
gint timeout)
{
DWORD ready;
GPollFD *f;
int recursed_result;
if (poll_msgs)
{
/* Wait for either messages or handles
* -> Use MsgWaitForMultipleObjectsEx
*/
if (_g_main_poll_debug)
g_print (" MsgWaitForMultipleObjectsEx(%d, %d)\n", nhandles, timeout);
ready = MsgWaitForMultipleObjectsEx (nhandles, handles, timeout,
QS_ALLINPUT, MWMO_ALERTABLE);
if (ready == WAIT_FAILED)
{
gchar *emsg = g_win32_error_message (GetLastError ());
g_warning ("MsgWaitForMultipleObjectsEx failed: %s", emsg);
g_free (emsg);
}
}
else if (nhandles == 0)
{
/* No handles to wait for, just the timeout */
if (timeout == INFINITE)
ready = WAIT_FAILED;
else
{
SleepEx (timeout, TRUE);
ready = WAIT_TIMEOUT;
}
}
else
{
/* Wait for just handles
* -> Use WaitForMultipleObjectsEx
*/
if (_g_main_poll_debug)
g_print (" WaitForMultipleObjectsEx(%d, %d)\n", nhandles, timeout);
ready = WaitForMultipleObjectsEx (nhandles, handles, FALSE, timeout, TRUE);
if (ready == WAIT_FAILED)
{
gchar *emsg = g_win32_error_message (GetLastError ());
g_warning ("WaitForMultipleObjectsEx failed: %s", emsg);
g_free (emsg);
}
}
if (_g_main_poll_debug)
g_print (" wait returns %ld%s\n",
ready,
(ready == WAIT_FAILED ? " (WAIT_FAILED)" :
(ready == WAIT_TIMEOUT ? " (WAIT_TIMEOUT)" :
(poll_msgs && ready == WAIT_OBJECT_0 + nhandles ? " (msg)" : ""))));
if (ready == WAIT_FAILED)
return -1;
else if (ready == WAIT_TIMEOUT ||
ready == WAIT_IO_COMPLETION)
return 0;
else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles)
{
for (f = fds; f < &fds[nfds]; ++f)
if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN)
f->revents |= G_IO_IN;
/* If we have a timeout, or no handles to poll, be satisfied
* with just noticing we have messages waiting.
*/
if (timeout != 0 || nhandles == 0)
return 1;
/* If no timeout and handles to poll, recurse to poll them,
* too.
*/
recursed_result = poll_rest (FALSE, handles, nhandles, fds, nfds, 0);
return (recursed_result == -1) ? -1 : 1 + recursed_result;
}
else if (ready >= WAIT_OBJECT_0 && ready < WAIT_OBJECT_0 + nhandles)
{
for (f = fds; f < &fds[nfds]; ++f)
{
if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0])
{
f->revents = f->events;
if (_g_main_poll_debug)
g_print (" got event %p\n", (HANDLE) f->fd);
}
}
/* If no timeout and polling several handles, recurse to poll
* the rest of them.
*/
if (timeout == 0 && nhandles > 1)
{
/* Remove the handle that fired */
int i;
if (ready < nhandles - 1)
for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++)
handles[i-1] = handles[i];
nhandles--;
recursed_result = poll_rest (FALSE, handles, nhandles, fds, nfds, 0);
return (recursed_result == -1) ? -1 : 1 + recursed_result;
}
return 1;
}
return 0;
}
gint
g_poll (GPollFD *fds,
guint nfds,
gint timeout)
{
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
gboolean poll_msgs = FALSE;
GPollFD *f;
gint nhandles = 0;
int retval;
if (_g_main_poll_debug)
g_print ("g_poll: waiting for");
for (f = fds; f < &fds[nfds]; ++f)
if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN))
{
if (_g_main_poll_debug && !poll_msgs)
g_print (" MSG");
poll_msgs = TRUE;
}
else if (f->fd > 0)
{
/* Don't add the same handle several times into the array, as
* docs say that is not allowed, even if it actually does seem
* to work.
*/
gint i;
for (i = 0; i < nhandles; i++)
if (handles[i] == (HANDLE) f->fd)
break;
if (i == nhandles)
{
if (nhandles == MAXIMUM_WAIT_OBJECTS)
{
g_warning ("Too many handles to wait for!\n");
break;
}
else
{
if (_g_main_poll_debug)
g_print (" %p", (HANDLE) f->fd);
handles[nhandles++] = (HANDLE) f->fd;
}
}
}
if (_g_main_poll_debug)
g_print ("\n");
for (f = fds; f < &fds[nfds]; ++f)
f->revents = 0;
if (timeout == -1)
timeout = INFINITE;
/* Polling for several things? */
if (nhandles > 1 || (nhandles > 0 && poll_msgs))
{
/* First check if one or several of them are immediately
* available
*/
retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, 0);
/* If not, and we have a significant timeout, poll again with
* timeout then. Note that this will return indication for only
* one event, or only for messages. We ignore timeouts less than
* ten milliseconds as they are mostly pointless on Windows, the
* MsgWaitForMultipleObjectsEx() call will timeout right away
* anyway.
*/
if (retval == 0 && (timeout == INFINITE || timeout >= 10))
retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, timeout);
}
else
{
/* Just polling for one thing, so no need to check first if
* available immediately
*/
retval = poll_rest (poll_msgs, handles, nhandles, fds, nfds, timeout);
}
if (retval == -1)
for (f = fds; f < &fds[nfds]; ++f)
f->revents = 0;
return retval;
}
#else /* !G_OS_WIN32 */
/* The following implementation of poll() comes from the GNU C Library.
* Copyright (C) 1994, 1996, 1997 Free Software Foundation, Inc.
*/
#include <string.h> /* for bzero on BSD systems */
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif /* HAVE_SYS_SELECT_H */
#ifdef G_OS_BEOS
#undef NO_FD_SET
#endif /* G_OS_BEOS */
#ifndef NO_FD_SET
# define SELECT_MASK fd_set
#else /* !NO_FD_SET */
# ifndef _AIX
typedef long fd_mask;
# endif /* _AIX */
# ifdef _IBMR2
# define SELECT_MASK void
# else /* !_IBMR2 */
# define SELECT_MASK int
# endif /* !_IBMR2 */
#endif /* !NO_FD_SET */
gint
g_poll (GPollFD *fds,
guint nfds,
gint timeout)
{
struct timeval tv;
SELECT_MASK rset, wset, xset;
GPollFD *f;
int ready;
int maxfd = 0;
FD_ZERO (&rset);
FD_ZERO (&wset);
FD_ZERO (&xset);
for (f = fds; f < &fds[nfds]; ++f)
if (f->fd >= 0)
{
if (f->events & G_IO_IN)
FD_SET (f->fd, &rset);
if (f->events & G_IO_OUT)
FD_SET (f->fd, &wset);
if (f->events & G_IO_PRI)
FD_SET (f->fd, &xset);
if (f->fd > maxfd && (f->events & (G_IO_IN|G_IO_OUT|G_IO_PRI)))
maxfd = f->fd;
}
tv.tv_sec = timeout / 1000;
tv.tv_usec = (timeout % 1000) * 1000;
ready = select (maxfd + 1, &rset, &wset, &xset,
timeout == -1 ? NULL : &tv);
if (ready > 0)
for (f = fds; f < &fds[nfds]; ++f)
{
f->revents = 0;
if (f->fd >= 0)
{
if (FD_ISSET (f->fd, &rset))
f->revents |= G_IO_IN;
if (FD_ISSET (f->fd, &wset))
f->revents |= G_IO_OUT;
if (FD_ISSET (f->fd, &xset))
f->revents |= G_IO_PRI;
}
}
return ready;
}
#endif /* !G_OS_WIN32 */
#endif /* !HAVE_POLL */
#define __G_POLL_C__
#include "galiasdef.c"
| xbmc/atv2 | xbmc/lib/libmms/glib-2.20.4/glib/gpoll.c | C | gpl-2.0 | 11,289 |
/*!
* OOjs UI v0.9.7
* https://www.mediawiki.org/wiki/OOjs_UI
*
* Copyright 2011–2015 OOjs Team and other contributors.
* Released under the MIT license
* http://oojs.mit-license.org
*
* Date: 2015-04-03T21:10:27Z
*/
.oo-ui-icon-bell {
background-image: /* @embed */ url(themes/mediawiki/images/icons/bell.png);
}
.oo-ui-icon-bellOn {
background-image: /* @embed */ url(themes/mediawiki/images/icons/bellOn-ltr.png);
}
.oo-ui-icon-eye {
background-image: /* @embed */ url(themes/mediawiki/images/icons/eye.png);
}
.oo-ui-icon-eyeClosed {
background-image: /* @embed */ url(themes/mediawiki/images/icons/eyeClosed.png);
}
.oo-ui-icon-message {
background-image: /* @embed */ url(themes/mediawiki/images/icons/message-ltr.png);
}
.oo-ui-icon-signature {
background-image: /* @embed */ url(themes/mediawiki/images/icons/signature-ltr.png);
}
.oo-ui-icon-speechBubble {
background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubble-ltr.png);
}
.oo-ui-icon-speechBubbleAdd {
background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubbleAdd-ltr.png);
}
.oo-ui-icon-speechBubbles {
background-image: /* @embed */ url(themes/mediawiki/images/icons/speechBubbles-ltr.png);
}
| jdloft/toolbar-proto | lib/oojs-ui/oojs-ui-mediawiki-icons-alerts.raster.css | CSS | gpl-2.0 | 1,218 |
/// <reference path="fourslash.ts" />
// @BaselineFile: getEmitOutputTsxFile_React.baseline
// @declaration: true
// @sourceMap: true
// @jsx: react
// @Filename: inputFile1.ts
// @emitThisFile: true
////// regular ts file
//// var t: number = 5;
//// class Bar {
//// x : string;
//// y : number
//// }
//// /*1*/
// @Filename: inputFile2.tsx
// @emitThisFile: true
//// declare var React: any;
//// var y = "my div";
//// var x = <div name= {y} />
//// /*2*/
goTo.marker("1");
verify.numberOfErrorsInCurrentFile(0);
goTo.marker("2");
verify.numberOfErrorsInCurrentFile(0);
verify.baselineGetEmitOutput(); | plantain-00/TypeScript | tests/cases/fourslash/getEmitOutputTsxFile_React.ts | TypeScript | apache-2.0 | 646 |
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.jps.model.serialization;
import com.intellij.openapi.application.PathManager;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.testFramework.PlatformTestUtil;
import org.jdom.Element;
import org.jetbrains.jps.model.JpsDummyElement;
import org.jetbrains.jps.model.JpsEncodingConfigurationService;
import org.jetbrains.jps.model.JpsEncodingProjectConfiguration;
import org.jetbrains.jps.model.artifact.JpsArtifactService;
import org.jetbrains.jps.model.java.*;
import org.jetbrains.jps.model.library.JpsLibrary;
import org.jetbrains.jps.model.library.JpsOrderRootType;
import org.jetbrains.jps.model.library.sdk.JpsSdkReference;
import org.jetbrains.jps.model.module.*;
import org.jetbrains.jps.model.serialization.library.JpsLibraryTableSerializer;
import org.jetbrains.jps.model.serialization.module.JpsModuleRootModelSerializer;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
/**
* @author nik
*/
public class JpsProjectSerializationTest extends JpsSerializationTestCase {
public static final String SAMPLE_PROJECT_PATH = "/jps/model-serialization/testData/sampleProject";
public void testLoadProject() {
loadProject(SAMPLE_PROJECT_PATH);
String baseDirPath = getTestDataFileAbsolutePath(SAMPLE_PROJECT_PATH);
assertTrue(FileUtil.filesEqual(new File(baseDirPath), JpsModelSerializationDataService.getBaseDirectory(myProject)));
assertEquals("sampleProjectName", myProject.getName());
List<JpsModule> modules = myProject.getModules();
assertEquals(3, modules.size());
JpsModule main = modules.get(0);
assertEquals("main", main.getName());
JpsModule util = modules.get(1);
assertEquals("util", util.getName());
JpsModule xxx = modules.get(2);
assertEquals("xxx", xxx.getName());
assertTrue(FileUtil.filesEqual(new File(baseDirPath, "util"), JpsModelSerializationDataService.getBaseDirectory(util)));
List<JpsLibrary> libraries = myProject.getLibraryCollection().getLibraries();
assertEquals(3, libraries.size());
List<JpsDependencyElement> dependencies = util.getDependenciesList().getDependencies();
assertEquals(4, dependencies.size());
JpsSdkDependency sdkDependency = assertInstanceOf(dependencies.get(0), JpsSdkDependency.class);
assertSame(JpsJavaSdkType.INSTANCE, sdkDependency.getSdkType());
JpsSdkReference<?> reference = sdkDependency.getSdkReference();
assertNotNull(reference);
assertEquals("1.5", reference.getSdkName());
assertInstanceOf(dependencies.get(1), JpsModuleSourceDependency.class);
assertInstanceOf(dependencies.get(2), JpsLibraryDependency.class);
assertInstanceOf(dependencies.get(3), JpsLibraryDependency.class);
JpsSdkDependency inheritedSdkDependency = assertInstanceOf(main.getDependenciesList().getDependencies().get(0), JpsSdkDependency.class);
JpsSdkReference<?> projectSdkReference = inheritedSdkDependency.getSdkReference();
assertNotNull(projectSdkReference);
assertEquals("1.6", projectSdkReference.getSdkName());
assertEquals(getUrl("xxx/output"), JpsJavaExtensionService.getInstance().getOutputUrl(xxx, true));
assertEquals(getUrl("xxx/output"), JpsJavaExtensionService.getInstance().getOutputUrl(xxx, false));
}
public void testFileBasedProjectNameAndBaseDir() {
String relativePath = "/jps/model-serialization/testData/run-configurations/run-configurations.ipr";
String absolutePath = getTestDataFileAbsolutePath(relativePath);
loadProject(relativePath);
assertEquals("run-configurations", myProject.getName());
assertTrue(FileUtil.filesEqual(new File(absolutePath).getParentFile(), JpsModelSerializationDataService.getBaseDirectory(myProject)));
}
public void testDirectoryBasedProjectName() {
loadProject("/jps/model-serialization/testData/run-configurations-dir");
assertEquals("run-configurations-dir", myProject.getName());
}
public void testImlUnderDotIdea() {
loadProject("/jps/model-serialization/testData/imlUnderDotIdea");
JpsModule module = assertOneElement(myProject.getModules());
JpsModuleSourceRoot root = assertOneElement(module.getSourceRoots());
assertEquals(getUrl("src"), root.getUrl());
}
public void testProjectSdkWithoutType() {
loadProject("/jps/model-serialization/testData/projectSdkWithoutType/projectSdkWithoutType.ipr");
JpsSdkReference<JpsDummyElement> reference = myProject.getSdkReferencesTable().getSdkReference(JpsJavaSdkType.INSTANCE);
assertNotNull(reference);
assertEquals("1.6", reference.getSdkName());
}
public void testInvalidDependencyScope() {
loadProject("/jps/model-serialization/testData/invalidDependencyScope/invalidDependencyScope.ipr");
JpsModule module = assertOneElement(myProject.getModules());
List<JpsDependencyElement> dependencies = module.getDependenciesList().getDependencies();
assertEquals(3, dependencies.size());
JpsJavaDependencyExtension extension = JpsJavaExtensionService.getInstance().getDependencyExtension(dependencies.get(2));
assertNotNull(extension);
assertEquals(JpsJavaDependencyScope.COMPILE, extension.getScope());
}
public void testDuplicatedModuleLibrary() {
loadProject("/jps/model-serialization/testData/duplicatedModuleLibrary/duplicatedModuleLibrary.ipr");
JpsModule module = assertOneElement(myProject.getModules());
List<JpsDependencyElement> dependencies = module.getDependenciesList().getDependencies();
assertEquals(4, dependencies.size());
JpsLibrary lib1 = assertInstanceOf(dependencies.get(2), JpsLibraryDependency.class).getLibrary();
assertNotNull(lib1);
assertSameElements(lib1.getRootUrls(JpsOrderRootType.COMPILED), getUrl("data/lib1"));
JpsLibrary lib2 = assertInstanceOf(dependencies.get(3), JpsLibraryDependency.class).getLibrary();
assertNotSame(lib1, lib2);
assertNotNull(lib2);
assertSameElements(lib2.getRootUrls(JpsOrderRootType.COMPILED), getUrl("data/lib2"));
}
public void testDotIdeaUnderDotIdea() {
loadProject("/jps/model-serialization/testData/matryoshka/.idea");
JpsJavaProjectExtension extension = JpsJavaExtensionService.getInstance().getProjectExtension(myProject);
assertNotNull(extension);
assertEquals(getUrl("out"), extension.getOutputUrl());
}
public void testLoadEncoding() {
loadProject(SAMPLE_PROJECT_PATH);
JpsEncodingConfigurationService service = JpsEncodingConfigurationService.getInstance();
assertEquals("UTF-8", service.getProjectEncoding(myModel));
JpsEncodingProjectConfiguration configuration = service.getEncodingConfiguration(myProject);
assertNotNull(configuration);
assertEquals("UTF-8", configuration.getProjectEncoding());
assertEquals("windows-1251", configuration.getEncoding(new File(getAbsolutePath("util"))));
assertEquals("windows-1251", configuration.getEncoding(new File(getAbsolutePath("util/foo/bar/file.txt"))));
assertEquals("UTF-8", configuration.getEncoding(new File(getAbsolutePath("other"))));
}
public void testResourceRoots() {
String projectPath = "/jps/model-serialization/testData/resourceRoots/";
loadProject(projectPath + "resourceRoots.ipr");
JpsModule module = assertOneElement(myProject.getModules());
List<JpsModuleSourceRoot> roots = module.getSourceRoots();
assertSame(JavaSourceRootType.SOURCE, roots.get(0).getRootType());
checkResourceRoot(roots.get(1), false, "");
checkResourceRoot(roots.get(2), true, "");
checkResourceRoot(roots.get(3), true, "foo");
doTestSaveModule(module, projectPath + "resourceRoots.iml");
}
private static void checkResourceRoot(JpsModuleSourceRoot root, boolean forGenerated, String relativeOutput) {
assertSame(JavaResourceRootType.RESOURCE, root.getRootType());
JavaResourceRootProperties properties = root.getProperties(JavaResourceRootType.RESOURCE);
assertNotNull(properties);
assertEquals(forGenerated, properties.isForGeneratedSources());
assertEquals(relativeOutput, properties.getRelativeOutputPath());
}
public void testSaveProject() {
loadProject(SAMPLE_PROJECT_PATH);
List<JpsModule> modules = myProject.getModules();
doTestSaveModule(modules.get(0), SAMPLE_PROJECT_PATH + "/main.iml");
doTestSaveModule(modules.get(1), SAMPLE_PROJECT_PATH + "/util/util.iml");
//tod[nik] remember that test output root wasn't specified and doesn't save it to avoid unnecessary modifications of iml files
//doTestSaveModule(modules.get(2), "xxx/xxx.iml");
File[] libs = getFileInSampleProject(".idea/libraries").listFiles();
assertNotNull(libs);
for (File libFile : libs) {
String libName = FileUtil.getNameWithoutExtension(libFile);
JpsLibrary library = myProject.getLibraryCollection().findLibrary(libName);
assertNotNull(libName, library);
doTestSaveLibrary(libFile, libName, library);
}
}
private void doTestSaveLibrary(File libFile, String libName, JpsLibrary library) {
try {
Element actual = new Element("library");
JpsLibraryTableSerializer.saveLibrary(library, actual, libName);
JpsMacroExpander
macroExpander = JpsProjectLoader.createProjectMacroExpander(Collections.<String, String>emptyMap(), getFileInSampleProject(""));
Element rootElement = JpsLoaderBase.loadRootElement(libFile, macroExpander);
Element expected = rootElement.getChild("library");
PlatformTestUtil.assertElementsEqual(expected, actual);
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
private void doTestSaveModule(JpsModule module, final String moduleFilePath) {
try {
Element actual = JDomSerializationUtil.createComponentElement("NewModuleRootManager");
JpsModuleRootModelSerializer.saveRootModel(module, actual);
File imlFile = new File(getTestDataFileAbsolutePath(moduleFilePath));
Element rootElement = loadModuleRootTag(imlFile);
Element expected = JDomSerializationUtil.findComponent(rootElement, "NewModuleRootManager");
PlatformTestUtil.assertElementsEqual(expected, actual);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
public File getFileInSampleProject(String relativePath) {
return new File(getTestDataFileAbsolutePath(SAMPLE_PROJECT_PATH + "/" + relativePath));
}
public void testLoadIdeaProject() {
long start = System.currentTimeMillis();
loadProjectByAbsolutePath(PathManager.getHomePath());
assertTrue(myProject.getModules().size() > 0);
System.out.println("JpsProjectSerializationTest: " + myProject.getModules().size() + " modules, " + myProject.getLibraryCollection().getLibraries().size() + " libraries and " +
JpsArtifactService.getInstance().getArtifacts(myProject).size() + " artifacts loaded in " + (System.currentTimeMillis() - start) + "ms");
}
}
| akosyakov/intellij-community | jps/model-serialization/testSrc/org/jetbrains/jps/model/serialization/JpsProjectSerializationTest.java | Java | apache-2.0 | 11,503 |
//
// basic_socket.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_BASIC_SOCKET_HPP
#define ASIO_BASIC_SOCKET_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/async_result.hpp"
#include "asio/basic_io_object.hpp"
#include "asio/detail/handler_type_requirements.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/error.hpp"
#include "asio/socket_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
/// Provides socket functionality.
/**
* The basic_socket class template provides functionality that is common to both
* stream-oriented and datagram-oriented sockets.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename Protocol, typename SocketService>
class basic_socket
: public basic_io_object<SocketService>,
public socket_base
{
public:
/// (Deprecated: Use native_handle_type.) The native representation of a
/// socket.
typedef typename SocketService::native_handle_type native_type;
/// The native representation of a socket.
typedef typename SocketService::native_handle_type native_handle_type;
/// The protocol type.
typedef Protocol protocol_type;
/// The endpoint type.
typedef typename Protocol::endpoint endpoint_type;
/// A basic_socket is always the lowest layer.
typedef basic_socket<Protocol, SocketService> lowest_layer_type;
/// Construct a basic_socket without opening it.
/**
* This constructor creates a socket without opening it.
*
* @param io_service The io_service object that the socket will use to
* dispatch handlers for any asynchronous operations performed on the socket.
*/
explicit basic_socket(asio::io_service& io_service)
: basic_io_object<SocketService>(io_service)
{
}
/// Construct and open a basic_socket.
/**
* This constructor creates and opens a socket.
*
* @param io_service The io_service object that the socket will use to
* dispatch handlers for any asynchronous operations performed on the socket.
*
* @param protocol An object specifying protocol parameters to be used.
*
* @throws asio::system_error Thrown on failure.
*/
basic_socket(asio::io_service& io_service,
const protocol_type& protocol)
: basic_io_object<SocketService>(io_service)
{
asio::error_code ec;
this->get_service().open(this->get_implementation(), protocol, ec);
asio::detail::throw_error(ec, "open");
}
/// Construct a basic_socket, opening it and binding it to the given local
/// endpoint.
/**
* This constructor creates a socket and automatically opens it bound to the
* specified endpoint on the local machine. The protocol used is the protocol
* associated with the given endpoint.
*
* @param io_service The io_service object that the socket will use to
* dispatch handlers for any asynchronous operations performed on the socket.
*
* @param endpoint An endpoint on the local machine to which the socket will
* be bound.
*
* @throws asio::system_error Thrown on failure.
*/
basic_socket(asio::io_service& io_service,
const endpoint_type& endpoint)
: basic_io_object<SocketService>(io_service)
{
asio::error_code ec;
const protocol_type protocol = endpoint.protocol();
this->get_service().open(this->get_implementation(), protocol, ec);
asio::detail::throw_error(ec, "open");
this->get_service().bind(this->get_implementation(), endpoint, ec);
asio::detail::throw_error(ec, "bind");
}
/// Construct a basic_socket on an existing native socket.
/**
* This constructor creates a socket object to hold an existing native socket.
*
* @param io_service The io_service object that the socket will use to
* dispatch handlers for any asynchronous operations performed on the socket.
*
* @param protocol An object specifying protocol parameters to be used.
*
* @param native_socket A native socket.
*
* @throws asio::system_error Thrown on failure.
*/
basic_socket(asio::io_service& io_service,
const protocol_type& protocol, const native_handle_type& native_socket)
: basic_io_object<SocketService>(io_service)
{
asio::error_code ec;
this->get_service().assign(this->get_implementation(),
protocol, native_socket, ec);
asio::detail::throw_error(ec, "assign");
}
#if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Move-construct a basic_socket from another.
/**
* This constructor moves a socket from one object to another.
*
* @param other The other basic_socket object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_socket(io_service&) constructor.
*/
basic_socket(basic_socket&& other)
: basic_io_object<SocketService>(
ASIO_MOVE_CAST(basic_socket)(other))
{
}
/// Move-assign a basic_socket from another.
/**
* This assignment operator moves a socket from one object to another.
*
* @param other The other basic_socket object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_socket(io_service&) constructor.
*/
basic_socket& operator=(basic_socket&& other)
{
basic_io_object<SocketService>::operator=(
ASIO_MOVE_CAST(basic_socket)(other));
return *this;
}
// All sockets have access to each other's implementations.
template <typename Protocol1, typename SocketService1>
friend class basic_socket;
/// Move-construct a basic_socket from a socket of another protocol type.
/**
* This constructor moves a socket from one object to another.
*
* @param other The other basic_socket object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_socket(io_service&) constructor.
*/
template <typename Protocol1, typename SocketService1>
basic_socket(basic_socket<Protocol1, SocketService1>&& other,
typename enable_if<is_convertible<Protocol1, Protocol>::value>::type* = 0)
: basic_io_object<SocketService>(other.get_io_service())
{
this->get_service().template converting_move_construct<Protocol1>(
this->get_implementation(), other.get_implementation());
}
/// Move-assign a basic_socket from a socket of another protocol type.
/**
* This assignment operator moves a socket from one object to another.
*
* @param other The other basic_socket object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_socket(io_service&) constructor.
*/
template <typename Protocol1, typename SocketService1>
typename enable_if<is_convertible<Protocol1, Protocol>::value,
basic_socket>::type& operator=(
basic_socket<Protocol1, SocketService1>&& other)
{
basic_socket tmp(ASIO_MOVE_CAST2(basic_socket<
Protocol1, SocketService1>)(other));
basic_io_object<SocketService>::operator=(
ASIO_MOVE_CAST(basic_socket)(tmp));
return *this;
}
#endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION)
/// Get a reference to the lowest layer.
/**
* This function returns a reference to the lowest layer in a stack of
* layers. Since a basic_socket cannot contain any further layers, it simply
* returns a reference to itself.
*
* @return A reference to the lowest layer in the stack of layers. Ownership
* is not transferred to the caller.
*/
lowest_layer_type& lowest_layer()
{
return *this;
}
/// Get a const reference to the lowest layer.
/**
* This function returns a const reference to the lowest layer in a stack of
* layers. Since a basic_socket cannot contain any further layers, it simply
* returns a reference to itself.
*
* @return A const reference to the lowest layer in the stack of layers.
* Ownership is not transferred to the caller.
*/
const lowest_layer_type& lowest_layer() const
{
return *this;
}
/// Open the socket using the specified protocol.
/**
* This function opens the socket so that it will use the specified protocol.
*
* @param protocol An object specifying protocol parameters to be used.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* socket.open(asio::ip::tcp::v4());
* @endcode
*/
void open(const protocol_type& protocol = protocol_type())
{
asio::error_code ec;
this->get_service().open(this->get_implementation(), protocol, ec);
asio::detail::throw_error(ec, "open");
}
/// Open the socket using the specified protocol.
/**
* This function opens the socket so that it will use the specified protocol.
*
* @param protocol An object specifying which protocol is to be used.
*
* @param ec Set to indicate what error occurred, if any.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* asio::error_code ec;
* socket.open(asio::ip::tcp::v4(), ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
asio::error_code open(const protocol_type& protocol,
asio::error_code& ec)
{
return this->get_service().open(this->get_implementation(), protocol, ec);
}
/// Assign an existing native socket to the socket.
/*
* This function opens the socket to hold an existing native socket.
*
* @param protocol An object specifying which protocol is to be used.
*
* @param native_socket A native socket.
*
* @throws asio::system_error Thrown on failure.
*/
void assign(const protocol_type& protocol,
const native_handle_type& native_socket)
{
asio::error_code ec;
this->get_service().assign(this->get_implementation(),
protocol, native_socket, ec);
asio::detail::throw_error(ec, "assign");
}
/// Assign an existing native socket to the socket.
/*
* This function opens the socket to hold an existing native socket.
*
* @param protocol An object specifying which protocol is to be used.
*
* @param native_socket A native socket.
*
* @param ec Set to indicate what error occurred, if any.
*/
asio::error_code assign(const protocol_type& protocol,
const native_handle_type& native_socket, asio::error_code& ec)
{
return this->get_service().assign(this->get_implementation(),
protocol, native_socket, ec);
}
/// Determine whether the socket is open.
bool is_open() const
{
return this->get_service().is_open(this->get_implementation());
}
/// Close the socket.
/**
* This function is used to close the socket. Any asynchronous send, receive
* or connect operations will be cancelled immediately, and will complete
* with the asio::error::operation_aborted error.
*
* @throws asio::system_error Thrown on failure. Note that, even if
* the function indicates an error, the underlying descriptor is closed.
*
* @note For portable behaviour with respect to graceful closure of a
* connected socket, call shutdown() before closing the socket.
*/
void close()
{
asio::error_code ec;
this->get_service().close(this->get_implementation(), ec);
asio::detail::throw_error(ec, "close");
}
/// Close the socket.
/**
* This function is used to close the socket. Any asynchronous send, receive
* or connect operations will be cancelled immediately, and will complete
* with the asio::error::operation_aborted error.
*
* @param ec Set to indicate what error occurred, if any. Note that, even if
* the function indicates an error, the underlying descriptor is closed.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::error_code ec;
* socket.close(ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*
* @note For portable behaviour with respect to graceful closure of a
* connected socket, call shutdown() before closing the socket.
*/
asio::error_code close(asio::error_code& ec)
{
return this->get_service().close(this->get_implementation(), ec);
}
/// (Deprecated: Use native_handle().) Get the native socket representation.
/**
* This function may be used to obtain the underlying representation of the
* socket. This is intended to allow access to native socket functionality
* that is not otherwise provided.
*/
native_type native()
{
return this->get_service().native_handle(this->get_implementation());
}
/// Get the native socket representation.
/**
* This function may be used to obtain the underlying representation of the
* socket. This is intended to allow access to native socket functionality
* that is not otherwise provided.
*/
native_handle_type native_handle()
{
return this->get_service().native_handle(this->get_implementation());
}
/// Cancel all asynchronous operations associated with the socket.
/**
* This function causes all outstanding asynchronous connect, send and receive
* operations to finish immediately, and the handlers for cancelled operations
* will be passed the asio::error::operation_aborted error.
*
* @throws asio::system_error Thrown on failure.
*
* @note Calls to cancel() will always fail with
* asio::error::operation_not_supported when run on Windows XP, Windows
* Server 2003, and earlier versions of Windows, unless
* ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has
* two issues that should be considered before enabling its use:
*
* @li It will only cancel asynchronous operations that were initiated in the
* current thread.
*
* @li It can appear to complete without error, but the request to cancel the
* unfinished operations may be silently ignored by the operating system.
* Whether it works or not seems to depend on the drivers that are installed.
*
* For portable cancellation, consider using one of the following
* alternatives:
*
* @li Disable asio's I/O completion port backend by defining
* ASIO_DISABLE_IOCP.
*
* @li Use the close() function to simultaneously cancel the outstanding
* operations and close the socket.
*
* When running on Windows Vista, Windows Server 2008, and later, the
* CancelIoEx function is always used. This function does not have the
* problems described above.
*/
#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \
&& (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \
&& !defined(ASIO_ENABLE_CANCELIO)
__declspec(deprecated("By default, this function always fails with "
"operation_not_supported when used on Windows XP, Windows Server 2003, "
"or earlier. Consult documentation for details."))
#endif
void cancel()
{
asio::error_code ec;
this->get_service().cancel(this->get_implementation(), ec);
asio::detail::throw_error(ec, "cancel");
}
/// Cancel all asynchronous operations associated with the socket.
/**
* This function causes all outstanding asynchronous connect, send and receive
* operations to finish immediately, and the handlers for cancelled operations
* will be passed the asio::error::operation_aborted error.
*
* @param ec Set to indicate what error occurred, if any.
*
* @note Calls to cancel() will always fail with
* asio::error::operation_not_supported when run on Windows XP, Windows
* Server 2003, and earlier versions of Windows, unless
* ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has
* two issues that should be considered before enabling its use:
*
* @li It will only cancel asynchronous operations that were initiated in the
* current thread.
*
* @li It can appear to complete without error, but the request to cancel the
* unfinished operations may be silently ignored by the operating system.
* Whether it works or not seems to depend on the drivers that are installed.
*
* For portable cancellation, consider using one of the following
* alternatives:
*
* @li Disable asio's I/O completion port backend by defining
* ASIO_DISABLE_IOCP.
*
* @li Use the close() function to simultaneously cancel the outstanding
* operations and close the socket.
*
* When running on Windows Vista, Windows Server 2008, and later, the
* CancelIoEx function is always used. This function does not have the
* problems described above.
*/
#if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \
&& (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \
&& !defined(ASIO_ENABLE_CANCELIO)
__declspec(deprecated("By default, this function always fails with "
"operation_not_supported when used on Windows XP, Windows Server 2003, "
"or earlier. Consult documentation for details."))
#endif
asio::error_code cancel(asio::error_code& ec)
{
return this->get_service().cancel(this->get_implementation(), ec);
}
/// Determine whether the socket is at the out-of-band data mark.
/**
* This function is used to check whether the socket input is currently
* positioned at the out-of-band data mark.
*
* @return A bool indicating whether the socket is at the out-of-band data
* mark.
*
* @throws asio::system_error Thrown on failure.
*/
bool at_mark() const
{
asio::error_code ec;
bool b = this->get_service().at_mark(this->get_implementation(), ec);
asio::detail::throw_error(ec, "at_mark");
return b;
}
/// Determine whether the socket is at the out-of-band data mark.
/**
* This function is used to check whether the socket input is currently
* positioned at the out-of-band data mark.
*
* @param ec Set to indicate what error occurred, if any.
*
* @return A bool indicating whether the socket is at the out-of-band data
* mark.
*/
bool at_mark(asio::error_code& ec) const
{
return this->get_service().at_mark(this->get_implementation(), ec);
}
/// Determine the number of bytes available for reading.
/**
* This function is used to determine the number of bytes that may be read
* without blocking.
*
* @return The number of bytes that may be read without blocking, or 0 if an
* error occurs.
*
* @throws asio::system_error Thrown on failure.
*/
std::size_t available() const
{
asio::error_code ec;
std::size_t s = this->get_service().available(
this->get_implementation(), ec);
asio::detail::throw_error(ec, "available");
return s;
}
/// Determine the number of bytes available for reading.
/**
* This function is used to determine the number of bytes that may be read
* without blocking.
*
* @param ec Set to indicate what error occurred, if any.
*
* @return The number of bytes that may be read without blocking, or 0 if an
* error occurs.
*/
std::size_t available(asio::error_code& ec) const
{
return this->get_service().available(this->get_implementation(), ec);
}
/// Bind the socket to the given local endpoint.
/**
* This function binds the socket to the specified endpoint on the local
* machine.
*
* @param endpoint An endpoint on the local machine to which the socket will
* be bound.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* socket.open(asio::ip::tcp::v4());
* socket.bind(asio::ip::tcp::endpoint(
* asio::ip::tcp::v4(), 12345));
* @endcode
*/
void bind(const endpoint_type& endpoint)
{
asio::error_code ec;
this->get_service().bind(this->get_implementation(), endpoint, ec);
asio::detail::throw_error(ec, "bind");
}
/// Bind the socket to the given local endpoint.
/**
* This function binds the socket to the specified endpoint on the local
* machine.
*
* @param endpoint An endpoint on the local machine to which the socket will
* be bound.
*
* @param ec Set to indicate what error occurred, if any.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* socket.open(asio::ip::tcp::v4());
* asio::error_code ec;
* socket.bind(asio::ip::tcp::endpoint(
* asio::ip::tcp::v4(), 12345), ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
asio::error_code bind(const endpoint_type& endpoint,
asio::error_code& ec)
{
return this->get_service().bind(this->get_implementation(), endpoint, ec);
}
/// Connect the socket to the specified endpoint.
/**
* This function is used to connect a socket to the specified remote endpoint.
* The function call will block until the connection is successfully made or
* an error occurs.
*
* The socket is automatically opened if it is not already open. If the
* connect fails, and the socket was automatically opened, the socket is
* not returned to the closed state.
*
* @param peer_endpoint The remote endpoint to which the socket will be
* connected.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* asio::ip::tcp::endpoint endpoint(
* asio::ip::address::from_string("1.2.3.4"), 12345);
* socket.connect(endpoint);
* @endcode
*/
void connect(const endpoint_type& peer_endpoint)
{
asio::error_code ec;
if (!is_open())
{
this->get_service().open(this->get_implementation(),
peer_endpoint.protocol(), ec);
asio::detail::throw_error(ec, "connect");
}
this->get_service().connect(this->get_implementation(), peer_endpoint, ec);
asio::detail::throw_error(ec, "connect");
}
/// Connect the socket to the specified endpoint.
/**
* This function is used to connect a socket to the specified remote endpoint.
* The function call will block until the connection is successfully made or
* an error occurs.
*
* The socket is automatically opened if it is not already open. If the
* connect fails, and the socket was automatically opened, the socket is
* not returned to the closed state.
*
* @param peer_endpoint The remote endpoint to which the socket will be
* connected.
*
* @param ec Set to indicate what error occurred, if any.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* asio::ip::tcp::endpoint endpoint(
* asio::ip::address::from_string("1.2.3.4"), 12345);
* asio::error_code ec;
* socket.connect(endpoint, ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
asio::error_code connect(const endpoint_type& peer_endpoint,
asio::error_code& ec)
{
if (!is_open())
{
if (this->get_service().open(this->get_implementation(),
peer_endpoint.protocol(), ec))
{
return ec;
}
}
return this->get_service().connect(
this->get_implementation(), peer_endpoint, ec);
}
/// Start an asynchronous connect.
/**
* This function is used to asynchronously connect a socket to the specified
* remote endpoint. The function call always returns immediately.
*
* The socket is automatically opened if it is not already open. If the
* connect fails, and the socket was automatically opened, the socket is
* not returned to the closed state.
*
* @param peer_endpoint The remote endpoint to which the socket will be
* connected. Copies will be made of the endpoint object as required.
*
* @param handler The handler to be called when the connection operation
* completes. Copies will be made of the handler as required. The function
* signature of the handler must be:
* @code void handler(
* const asio::error_code& error // Result of operation
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the handler will not be invoked from within this function. Invocation
* of the handler will be performed in a manner equivalent to using
* asio::io_service::post().
*
* @par Example
* @code
* void connect_handler(const asio::error_code& error)
* {
* if (!error)
* {
* // Connect succeeded.
* }
* }
*
* ...
*
* asio::ip::tcp::socket socket(io_service);
* asio::ip::tcp::endpoint endpoint(
* asio::ip::address::from_string("1.2.3.4"), 12345);
* socket.async_connect(endpoint, connect_handler);
* @endcode
*/
template <typename ConnectHandler>
ASIO_INITFN_RESULT_TYPE(ConnectHandler,
void (asio::error_code))
async_connect(const endpoint_type& peer_endpoint,
ASIO_MOVE_ARG(ConnectHandler) handler)
{
// If you get an error on the following line it means that your handler does
// not meet the documented type requirements for a ConnectHandler.
ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check;
if (!is_open())
{
asio::error_code ec;
const protocol_type protocol = peer_endpoint.protocol();
if (this->get_service().open(this->get_implementation(), protocol, ec))
{
detail::async_result_init<
ConnectHandler, void (asio::error_code)> init(
ASIO_MOVE_CAST(ConnectHandler)(handler));
this->get_io_service().post(
asio::detail::bind_handler(
ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(
ConnectHandler, void (asio::error_code)))(
init.handler), ec));
return init.result.get();
}
}
return this->get_service().async_connect(this->get_implementation(),
peer_endpoint, ASIO_MOVE_CAST(ConnectHandler)(handler));
}
/// Set an option on the socket.
/**
* This function is used to set an option on the socket.
*
* @param option The new option value to be set on the socket.
*
* @throws asio::system_error Thrown on failure.
*
* @sa SettableSocketOption @n
* asio::socket_base::broadcast @n
* asio::socket_base::do_not_route @n
* asio::socket_base::keep_alive @n
* asio::socket_base::linger @n
* asio::socket_base::receive_buffer_size @n
* asio::socket_base::receive_low_watermark @n
* asio::socket_base::reuse_address @n
* asio::socket_base::send_buffer_size @n
* asio::socket_base::send_low_watermark @n
* asio::ip::multicast::join_group @n
* asio::ip::multicast::leave_group @n
* asio::ip::multicast::enable_loopback @n
* asio::ip::multicast::outbound_interface @n
* asio::ip::multicast::hops @n
* asio::ip::tcp::no_delay
*
* @par Example
* Setting the IPPROTO_TCP/TCP_NODELAY option:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::no_delay option(true);
* socket.set_option(option);
* @endcode
*/
template <typename SettableSocketOption>
void set_option(const SettableSocketOption& option)
{
asio::error_code ec;
this->get_service().set_option(this->get_implementation(), option, ec);
asio::detail::throw_error(ec, "set_option");
}
/// Set an option on the socket.
/**
* This function is used to set an option on the socket.
*
* @param option The new option value to be set on the socket.
*
* @param ec Set to indicate what error occurred, if any.
*
* @sa SettableSocketOption @n
* asio::socket_base::broadcast @n
* asio::socket_base::do_not_route @n
* asio::socket_base::keep_alive @n
* asio::socket_base::linger @n
* asio::socket_base::receive_buffer_size @n
* asio::socket_base::receive_low_watermark @n
* asio::socket_base::reuse_address @n
* asio::socket_base::send_buffer_size @n
* asio::socket_base::send_low_watermark @n
* asio::ip::multicast::join_group @n
* asio::ip::multicast::leave_group @n
* asio::ip::multicast::enable_loopback @n
* asio::ip::multicast::outbound_interface @n
* asio::ip::multicast::hops @n
* asio::ip::tcp::no_delay
*
* @par Example
* Setting the IPPROTO_TCP/TCP_NODELAY option:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::no_delay option(true);
* asio::error_code ec;
* socket.set_option(option, ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
template <typename SettableSocketOption>
asio::error_code set_option(const SettableSocketOption& option,
asio::error_code& ec)
{
return this->get_service().set_option(
this->get_implementation(), option, ec);
}
/// Get an option from the socket.
/**
* This function is used to get the current value of an option on the socket.
*
* @param option The option value to be obtained from the socket.
*
* @throws asio::system_error Thrown on failure.
*
* @sa GettableSocketOption @n
* asio::socket_base::broadcast @n
* asio::socket_base::do_not_route @n
* asio::socket_base::keep_alive @n
* asio::socket_base::linger @n
* asio::socket_base::receive_buffer_size @n
* asio::socket_base::receive_low_watermark @n
* asio::socket_base::reuse_address @n
* asio::socket_base::send_buffer_size @n
* asio::socket_base::send_low_watermark @n
* asio::ip::multicast::join_group @n
* asio::ip::multicast::leave_group @n
* asio::ip::multicast::enable_loopback @n
* asio::ip::multicast::outbound_interface @n
* asio::ip::multicast::hops @n
* asio::ip::tcp::no_delay
*
* @par Example
* Getting the value of the SOL_SOCKET/SO_KEEPALIVE option:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::socket::keep_alive option;
* socket.get_option(option);
* bool is_set = option.value();
* @endcode
*/
template <typename GettableSocketOption>
void get_option(GettableSocketOption& option) const
{
asio::error_code ec;
this->get_service().get_option(this->get_implementation(), option, ec);
asio::detail::throw_error(ec, "get_option");
}
/// Get an option from the socket.
/**
* This function is used to get the current value of an option on the socket.
*
* @param option The option value to be obtained from the socket.
*
* @param ec Set to indicate what error occurred, if any.
*
* @sa GettableSocketOption @n
* asio::socket_base::broadcast @n
* asio::socket_base::do_not_route @n
* asio::socket_base::keep_alive @n
* asio::socket_base::linger @n
* asio::socket_base::receive_buffer_size @n
* asio::socket_base::receive_low_watermark @n
* asio::socket_base::reuse_address @n
* asio::socket_base::send_buffer_size @n
* asio::socket_base::send_low_watermark @n
* asio::ip::multicast::join_group @n
* asio::ip::multicast::leave_group @n
* asio::ip::multicast::enable_loopback @n
* asio::ip::multicast::outbound_interface @n
* asio::ip::multicast::hops @n
* asio::ip::tcp::no_delay
*
* @par Example
* Getting the value of the SOL_SOCKET/SO_KEEPALIVE option:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::socket::keep_alive option;
* asio::error_code ec;
* socket.get_option(option, ec);
* if (ec)
* {
* // An error occurred.
* }
* bool is_set = option.value();
* @endcode
*/
template <typename GettableSocketOption>
asio::error_code get_option(GettableSocketOption& option,
asio::error_code& ec) const
{
return this->get_service().get_option(
this->get_implementation(), option, ec);
}
/// Perform an IO control command on the socket.
/**
* This function is used to execute an IO control command on the socket.
*
* @param command The IO control command to be performed on the socket.
*
* @throws asio::system_error Thrown on failure.
*
* @sa IoControlCommand @n
* asio::socket_base::bytes_readable @n
* asio::socket_base::non_blocking_io
*
* @par Example
* Getting the number of bytes ready to read:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::socket::bytes_readable command;
* socket.io_control(command);
* std::size_t bytes_readable = command.get();
* @endcode
*/
template <typename IoControlCommand>
void io_control(IoControlCommand& command)
{
asio::error_code ec;
this->get_service().io_control(this->get_implementation(), command, ec);
asio::detail::throw_error(ec, "io_control");
}
/// Perform an IO control command on the socket.
/**
* This function is used to execute an IO control command on the socket.
*
* @param command The IO control command to be performed on the socket.
*
* @param ec Set to indicate what error occurred, if any.
*
* @sa IoControlCommand @n
* asio::socket_base::bytes_readable @n
* asio::socket_base::non_blocking_io
*
* @par Example
* Getting the number of bytes ready to read:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::socket::bytes_readable command;
* asio::error_code ec;
* socket.io_control(command, ec);
* if (ec)
* {
* // An error occurred.
* }
* std::size_t bytes_readable = command.get();
* @endcode
*/
template <typename IoControlCommand>
asio::error_code io_control(IoControlCommand& command,
asio::error_code& ec)
{
return this->get_service().io_control(
this->get_implementation(), command, ec);
}
/// Gets the non-blocking mode of the socket.
/**
* @returns @c true if the socket's synchronous operations will fail with
* asio::error::would_block if they are unable to perform the requested
* operation immediately. If @c false, synchronous operations will block
* until complete.
*
* @note The non-blocking mode has no effect on the behaviour of asynchronous
* operations. Asynchronous operations will never fail with the error
* asio::error::would_block.
*/
bool non_blocking() const
{
return this->get_service().non_blocking(this->get_implementation());
}
/// Sets the non-blocking mode of the socket.
/**
* @param mode If @c true, the socket's synchronous operations will fail with
* asio::error::would_block if they are unable to perform the requested
* operation immediately. If @c false, synchronous operations will block
* until complete.
*
* @throws asio::system_error Thrown on failure.
*
* @note The non-blocking mode has no effect on the behaviour of asynchronous
* operations. Asynchronous operations will never fail with the error
* asio::error::would_block.
*/
void non_blocking(bool mode)
{
asio::error_code ec;
this->get_service().non_blocking(this->get_implementation(), mode, ec);
asio::detail::throw_error(ec, "non_blocking");
}
/// Sets the non-blocking mode of the socket.
/**
* @param mode If @c true, the socket's synchronous operations will fail with
* asio::error::would_block if they are unable to perform the requested
* operation immediately. If @c false, synchronous operations will block
* until complete.
*
* @param ec Set to indicate what error occurred, if any.
*
* @note The non-blocking mode has no effect on the behaviour of asynchronous
* operations. Asynchronous operations will never fail with the error
* asio::error::would_block.
*/
asio::error_code non_blocking(
bool mode, asio::error_code& ec)
{
return this->get_service().non_blocking(
this->get_implementation(), mode, ec);
}
/// Gets the non-blocking mode of the native socket implementation.
/**
* This function is used to retrieve the non-blocking mode of the underlying
* native socket. This mode has no effect on the behaviour of the socket
* object's synchronous operations.
*
* @returns @c true if the underlying socket is in non-blocking mode and
* direct system calls may fail with asio::error::would_block (or the
* equivalent system error).
*
* @note The current non-blocking mode is cached by the socket object.
* Consequently, the return value may be incorrect if the non-blocking mode
* was set directly on the native socket.
*
* @par Example
* This function is intended to allow the encapsulation of arbitrary
* non-blocking system calls as asynchronous operations, in a way that is
* transparent to the user of the socket object. The following example
* illustrates how Linux's @c sendfile system call might be encapsulated:
* @code template <typename Handler>
* struct sendfile_op
* {
* tcp::socket& sock_;
* int fd_;
* Handler handler_;
* off_t offset_;
* std::size_t total_bytes_transferred_;
*
* // Function call operator meeting WriteHandler requirements.
* // Used as the handler for the async_write_some operation.
* void operator()(asio::error_code ec, std::size_t)
* {
* // Put the underlying socket into non-blocking mode.
* if (!ec)
* if (!sock_.native_non_blocking())
* sock_.native_non_blocking(true, ec);
*
* if (!ec)
* {
* for (;;)
* {
* // Try the system call.
* errno = 0;
* int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);
* ec = asio::error_code(n < 0 ? errno : 0,
* asio::error::get_system_category());
* total_bytes_transferred_ += ec ? 0 : n;
*
* // Retry operation immediately if interrupted by signal.
* if (ec == asio::error::interrupted)
* continue;
*
* // Check if we need to run the operation again.
* if (ec == asio::error::would_block
* || ec == asio::error::try_again)
* {
* // We have to wait for the socket to become ready again.
* sock_.async_write_some(asio::null_buffers(), *this);
* return;
* }
*
* if (ec || n == 0)
* {
* // An error occurred, or we have reached the end of the file.
* // Either way we must exit the loop so we can call the handler.
* break;
* }
*
* // Loop around to try calling sendfile again.
* }
* }
*
* // Pass result back to user's handler.
* handler_(ec, total_bytes_transferred_);
* }
* };
*
* template <typename Handler>
* void async_sendfile(tcp::socket& sock, int fd, Handler h)
* {
* sendfile_op<Handler> op = { sock, fd, h, 0, 0 };
* sock.async_write_some(asio::null_buffers(), op);
* } @endcode
*/
bool native_non_blocking() const
{
return this->get_service().native_non_blocking(this->get_implementation());
}
/// Sets the non-blocking mode of the native socket implementation.
/**
* This function is used to modify the non-blocking mode of the underlying
* native socket. It has no effect on the behaviour of the socket object's
* synchronous operations.
*
* @param mode If @c true, the underlying socket is put into non-blocking
* mode and direct system calls may fail with asio::error::would_block
* (or the equivalent system error).
*
* @throws asio::system_error Thrown on failure. If the @c mode is
* @c false, but the current value of @c non_blocking() is @c true, this
* function fails with asio::error::invalid_argument, as the
* combination does not make sense.
*
* @par Example
* This function is intended to allow the encapsulation of arbitrary
* non-blocking system calls as asynchronous operations, in a way that is
* transparent to the user of the socket object. The following example
* illustrates how Linux's @c sendfile system call might be encapsulated:
* @code template <typename Handler>
* struct sendfile_op
* {
* tcp::socket& sock_;
* int fd_;
* Handler handler_;
* off_t offset_;
* std::size_t total_bytes_transferred_;
*
* // Function call operator meeting WriteHandler requirements.
* // Used as the handler for the async_write_some operation.
* void operator()(asio::error_code ec, std::size_t)
* {
* // Put the underlying socket into non-blocking mode.
* if (!ec)
* if (!sock_.native_non_blocking())
* sock_.native_non_blocking(true, ec);
*
* if (!ec)
* {
* for (;;)
* {
* // Try the system call.
* errno = 0;
* int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);
* ec = asio::error_code(n < 0 ? errno : 0,
* asio::error::get_system_category());
* total_bytes_transferred_ += ec ? 0 : n;
*
* // Retry operation immediately if interrupted by signal.
* if (ec == asio::error::interrupted)
* continue;
*
* // Check if we need to run the operation again.
* if (ec == asio::error::would_block
* || ec == asio::error::try_again)
* {
* // We have to wait for the socket to become ready again.
* sock_.async_write_some(asio::null_buffers(), *this);
* return;
* }
*
* if (ec || n == 0)
* {
* // An error occurred, or we have reached the end of the file.
* // Either way we must exit the loop so we can call the handler.
* break;
* }
*
* // Loop around to try calling sendfile again.
* }
* }
*
* // Pass result back to user's handler.
* handler_(ec, total_bytes_transferred_);
* }
* };
*
* template <typename Handler>
* void async_sendfile(tcp::socket& sock, int fd, Handler h)
* {
* sendfile_op<Handler> op = { sock, fd, h, 0, 0 };
* sock.async_write_some(asio::null_buffers(), op);
* } @endcode
*/
void native_non_blocking(bool mode)
{
asio::error_code ec;
this->get_service().native_non_blocking(
this->get_implementation(), mode, ec);
asio::detail::throw_error(ec, "native_non_blocking");
}
/// Sets the non-blocking mode of the native socket implementation.
/**
* This function is used to modify the non-blocking mode of the underlying
* native socket. It has no effect on the behaviour of the socket object's
* synchronous operations.
*
* @param mode If @c true, the underlying socket is put into non-blocking
* mode and direct system calls may fail with asio::error::would_block
* (or the equivalent system error).
*
* @param ec Set to indicate what error occurred, if any. If the @c mode is
* @c false, but the current value of @c non_blocking() is @c true, this
* function fails with asio::error::invalid_argument, as the
* combination does not make sense.
*
* @par Example
* This function is intended to allow the encapsulation of arbitrary
* non-blocking system calls as asynchronous operations, in a way that is
* transparent to the user of the socket object. The following example
* illustrates how Linux's @c sendfile system call might be encapsulated:
* @code template <typename Handler>
* struct sendfile_op
* {
* tcp::socket& sock_;
* int fd_;
* Handler handler_;
* off_t offset_;
* std::size_t total_bytes_transferred_;
*
* // Function call operator meeting WriteHandler requirements.
* // Used as the handler for the async_write_some operation.
* void operator()(asio::error_code ec, std::size_t)
* {
* // Put the underlying socket into non-blocking mode.
* if (!ec)
* if (!sock_.native_non_blocking())
* sock_.native_non_blocking(true, ec);
*
* if (!ec)
* {
* for (;;)
* {
* // Try the system call.
* errno = 0;
* int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536);
* ec = asio::error_code(n < 0 ? errno : 0,
* asio::error::get_system_category());
* total_bytes_transferred_ += ec ? 0 : n;
*
* // Retry operation immediately if interrupted by signal.
* if (ec == asio::error::interrupted)
* continue;
*
* // Check if we need to run the operation again.
* if (ec == asio::error::would_block
* || ec == asio::error::try_again)
* {
* // We have to wait for the socket to become ready again.
* sock_.async_write_some(asio::null_buffers(), *this);
* return;
* }
*
* if (ec || n == 0)
* {
* // An error occurred, or we have reached the end of the file.
* // Either way we must exit the loop so we can call the handler.
* break;
* }
*
* // Loop around to try calling sendfile again.
* }
* }
*
* // Pass result back to user's handler.
* handler_(ec, total_bytes_transferred_);
* }
* };
*
* template <typename Handler>
* void async_sendfile(tcp::socket& sock, int fd, Handler h)
* {
* sendfile_op<Handler> op = { sock, fd, h, 0, 0 };
* sock.async_write_some(asio::null_buffers(), op);
* } @endcode
*/
asio::error_code native_non_blocking(
bool mode, asio::error_code& ec)
{
return this->get_service().native_non_blocking(
this->get_implementation(), mode, ec);
}
/// Get the local endpoint of the socket.
/**
* This function is used to obtain the locally bound endpoint of the socket.
*
* @returns An object that represents the local endpoint of the socket.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::endpoint endpoint = socket.local_endpoint();
* @endcode
*/
endpoint_type local_endpoint() const
{
asio::error_code ec;
endpoint_type ep = this->get_service().local_endpoint(
this->get_implementation(), ec);
asio::detail::throw_error(ec, "local_endpoint");
return ep;
}
/// Get the local endpoint of the socket.
/**
* This function is used to obtain the locally bound endpoint of the socket.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns An object that represents the local endpoint of the socket.
* Returns a default-constructed endpoint object if an error occurred.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::error_code ec;
* asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
endpoint_type local_endpoint(asio::error_code& ec) const
{
return this->get_service().local_endpoint(this->get_implementation(), ec);
}
/// Get the remote endpoint of the socket.
/**
* This function is used to obtain the remote endpoint of the socket.
*
* @returns An object that represents the remote endpoint of the socket.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::ip::tcp::endpoint endpoint = socket.remote_endpoint();
* @endcode
*/
endpoint_type remote_endpoint() const
{
asio::error_code ec;
endpoint_type ep = this->get_service().remote_endpoint(
this->get_implementation(), ec);
asio::detail::throw_error(ec, "remote_endpoint");
return ep;
}
/// Get the remote endpoint of the socket.
/**
* This function is used to obtain the remote endpoint of the socket.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns An object that represents the remote endpoint of the socket.
* Returns a default-constructed endpoint object if an error occurred.
*
* @par Example
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::error_code ec;
* asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
endpoint_type remote_endpoint(asio::error_code& ec) const
{
return this->get_service().remote_endpoint(this->get_implementation(), ec);
}
/// Disable sends or receives on the socket.
/**
* This function is used to disable send operations, receive operations, or
* both.
*
* @param what Determines what types of operation will no longer be allowed.
*
* @throws asio::system_error Thrown on failure.
*
* @par Example
* Shutting down the send side of the socket:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* socket.shutdown(asio::ip::tcp::socket::shutdown_send);
* @endcode
*/
void shutdown(shutdown_type what)
{
asio::error_code ec;
this->get_service().shutdown(this->get_implementation(), what, ec);
asio::detail::throw_error(ec, "shutdown");
}
/// Disable sends or receives on the socket.
/**
* This function is used to disable send operations, receive operations, or
* both.
*
* @param what Determines what types of operation will no longer be allowed.
*
* @param ec Set to indicate what error occurred, if any.
*
* @par Example
* Shutting down the send side of the socket:
* @code
* asio::ip::tcp::socket socket(io_service);
* ...
* asio::error_code ec;
* socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec);
* if (ec)
* {
* // An error occurred.
* }
* @endcode
*/
asio::error_code shutdown(shutdown_type what,
asio::error_code& ec)
{
return this->get_service().shutdown(this->get_implementation(), what, ec);
}
protected:
/// Protected destructor to prevent deletion through this type.
~basic_socket()
{
}
};
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_BASIC_SOCKET_HPP
| julien3/vertxbuspp | vertxbuspp/asio/include/asio/basic_socket.hpp | C++ | apache-2.0 | 50,601 |
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !cgo
package so_test
// Nothing to test.
| akutz/go | misc/cgo/testsovar/noso_test.go | GO | bsd-3-clause | 213 |
{{+partials.standard_apps_api api:apis.apps.document_scan intro:intros.documentScan chrome_os_only:true/}}
| Chilledheart/chromium | chrome/common/extensions/docs/templates/public/apps/documentScan.html | HTML | bsd-3-clause | 107 |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2013 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @package Zend_Validator
*/
namespace ZendTest\Validator\File;
use Zend\Validator\File\ExcludeMimeType;
/**
* ExcludeMimeType testbed
*
* @category Zend
* @package Zend_Validator_File
* @subpackage UnitTests
* @group Zend_Validator
*/
class ExcludeMimeTypeTest extends \PHPUnit_Framework_TestCase
{
/**
* @return array
*/
public function basicBehaviorDataProvider()
{
$testFile = __DIR__ . '/_files/picture.jpg';
$fileUpload = array(
'tmp_name' => $testFile, 'name' => basename($testFile),
'size' => 200, 'error' => 0, 'type' => 'image/jpeg'
);
return array(
// Options, isValid Param, Expected value
array('image/gif', $fileUpload, true),
array('image', $fileUpload, false),
array('test/notype', $fileUpload, true),
array('image/gif, image/jpeg', $fileUpload, false),
array(array('image/vasa', 'image/gif'), $fileUpload, true),
array(array('image/gif', 'jpeg'), $fileUpload, false),
array(array('image/gif', 'gif'), $fileUpload, true),
);
}
/**
* Ensures that the validator follows expected behavior
*
* @dataProvider basicBehaviorDataProvider
* @return void
*/
public function testBasic($options, $isValidParam, $expected)
{
$validator = new ExcludeMimeType($options);
$validator->enableHeaderCheck();
$this->assertEquals($expected, $validator->isValid($isValidParam));
}
/**
* Ensures that the validator follows expected behavior for legacy Zend\Transfer API
*
* @dataProvider basicBehaviorDataProvider
* @return void
*/
public function testLegacy($options, $isValidParam, $expected)
{
if (is_array($isValidParam)) {
$validator = new ExcludeMimeType($options);
$validator->enableHeaderCheck();
$this->assertEquals($expected, $validator->isValid($isValidParam['tmp_name'], $isValidParam));
}
}
/**
* Ensures that getMimeType() returns expected value
*
* @return void
*/
public function testGetMimeType()
{
$validator = new ExcludeMimeType('image/gif');
$this->assertEquals('image/gif', $validator->getMimeType());
$validator = new ExcludeMimeType(array('image/gif', 'video', 'text/test'));
$this->assertEquals('image/gif,video,text/test', $validator->getMimeType());
$validator = new ExcludeMimeType(array('image/gif', 'video', 'text/test'));
$this->assertEquals(array('image/gif', 'video', 'text/test'), $validator->getMimeType(true));
}
/**
* Ensures that setMimeType() returns expected value
*
* @return void
*/
public function testSetMimeType()
{
$validator = new ExcludeMimeType('image/gif');
$validator->setMimeType('image/jpeg');
$this->assertEquals('image/jpeg', $validator->getMimeType());
$this->assertEquals(array('image/jpeg'), $validator->getMimeType(true));
$validator->setMimeType('image/gif, text/test');
$this->assertEquals('image/gif,text/test', $validator->getMimeType());
$this->assertEquals(array('image/gif', 'text/test'), $validator->getMimeType(true));
$validator->setMimeType(array('video/mpeg', 'gif'));
$this->assertEquals('video/mpeg,gif', $validator->getMimeType());
$this->assertEquals(array('video/mpeg', 'gif'), $validator->getMimeType(true));
}
/**
* Ensures that addMimeType() returns expected value
*
* @return void
*/
public function testAddMimeType()
{
$validator = new ExcludeMimeType('image/gif');
$validator->addMimeType('text');
$this->assertEquals('image/gif,text', $validator->getMimeType());
$this->assertEquals(array('image/gif', 'text'), $validator->getMimeType(true));
$validator->addMimeType('jpg, to');
$this->assertEquals('image/gif,text,jpg,to', $validator->getMimeType());
$this->assertEquals(array('image/gif', 'text', 'jpg', 'to'), $validator->getMimeType(true));
$validator->addMimeType(array('zip', 'ti'));
$this->assertEquals('image/gif,text,jpg,to,zip,ti', $validator->getMimeType());
$this->assertEquals(array('image/gif', 'text', 'jpg', 'to', 'zip', 'ti'), $validator->getMimeType(true));
$validator->addMimeType('');
$this->assertEquals('image/gif,text,jpg,to,zip,ti', $validator->getMimeType());
$this->assertEquals(array('image/gif', 'text', 'jpg', 'to', 'zip', 'ti'), $validator->getMimeType(true));
}
}
| asheehan/zendTodo | vendor/zendframework/zendframework/tests/ZendTest/Validator/File/ExcludeMimeTypeTest.php | PHP | bsd-3-clause | 5,086 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rel="shortcut icon" type="image/ico" href="http://www.datatables.net/media/images/favicon.ico" />
<title>DataTables example</title>
<style type="text/css" title="currentStyle">
@import "../../media/css/demo_page.css";
@import "../../media/css/demo_table.css";
</style>
<script type="text/javascript" language="javascript" src="../../media/js/jquery.js"></script>
<script type="text/javascript" language="javascript" src="../../media/js/jquery.dataTables.js"></script>
<script type="text/javascript" charset="utf-8">
$(document).ready(function() {
$('#example').dataTable( {
"fnFooterCallback": function ( nRow, aaData, iStart, iEnd, aiDisplay ) {
/*
* Calculate the total market share for all browsers in this table (ie inc. outside
* the pagination)
*/
var iTotalMarket = 0;
for ( var i=0 ; i<aaData.length ; i++ )
{
iTotalMarket += aaData[i][4]*1;
}
/* Calculate the market share for browsers on this page */
var iPageMarket = 0;
for ( var i=iStart ; i<iEnd ; i++ )
{
iPageMarket += aaData[ aiDisplay[i] ][4]*1;
}
/* Modify the footer row to match what we want */
var nCells = nRow.getElementsByTagName('th');
nCells[1].innerHTML = parseInt(iPageMarket * 100)/100 +
'% ('+ parseInt(iTotalMarket * 100)/100 +'% total)';
}
} );
} );
</script>
</head>
<body id="dt_example">
<div id="container">
<div class="full_width big">
DataTables footer callback example
</div>
<h1>Preamble</h1>
<p>DataTables using the header and footer callback manipulation functions (fnHeaderCallback() and fnFooterCallback()) you can perform some powerful and useful data manipulation. The example given below shows how a callback function can be used to total up visible (and hidden) data, taking into account all of DataTable's features (pagination, filtering etc).</p>
<h1>Live example</h1>
<div id="demo">
<table cellpadding="0" cellspacing="0" border="0" class="display" id="example">
<thead>
<tr>
<th>Rendering engine</th>
<th>Browser</th>
<th>Engine version</th>
<th>CSS grade</th>
<th style="width: 150px">Market share (%)</th>
</tr>
</thead>
<tbody>
<tr class="gradeX">
<td>Trident</td>
<td>
Internet
Explorer
4.0
</td>
<td class="center">4</td>
<td class="center">X</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>Trident</td>
<td>Internet
Explorer 5.0</td>
<td class="center">5</td>
<td class="center">C</td>
<td class="center">0.1</td>
</tr>
<tr class="gradeA">
<td>Trident</td>
<td>Internet
Explorer 5.5</td>
<td class="center">5.5</td>
<td class="center">A</td>
<td class="center">0.5</td>
</tr>
<tr class="gradeA">
<td>Trident</td>
<td>Internet
Explorer 6</td>
<td class="center">6</td>
<td class="center">A</td>
<td class="center">36</td>
</tr>
<tr class="gradeA">
<td>Trident</td>
<td>Internet Explorer 7</td>
<td class="center">7</td>
<td class="center">A</td>
<td class="center">41</td>
</tr>
<tr class="gradeA">
<td>Trident</td>
<td>AOL browser (AOL desktop)</td>
<td class="center">6</td>
<td class="center">A</td>
<td class="center">1</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Firefox 1.0</td>
<td class="center">1.7</td>
<td class="center">A</td>
<td class="center">0.1</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Firefox 1.5</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.5</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Firefox 2.0</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">7</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Firefox 3.0</td>
<td class="center">1.9</td>
<td class="center">A</td>
<td class="center">9</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Camino 1.0</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Camino 1.5</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Netscape 7.2</td>
<td class="center">1.7</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Netscape Browser 8</td>
<td class="center">1.7</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Netscape Navigator 9</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.0</td>
<td class="center">1</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.1</td>
<td class="center">1.1</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.2</td>
<td class="center">1.2</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.3</td>
<td class="center">1.3</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.4</td>
<td class="center">1.4</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.5</td>
<td class="center">1.5</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.6</td>
<td class="center">1.6</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.7</td>
<td class="center">1.7</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Mozilla 1.8</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Seamonkey 1.1</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Gecko</td>
<td>Epiphany 2.20</td>
<td class="center">1.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>Safari 1.2</td>
<td class="center">125.5</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>Safari 1.3</td>
<td class="center">312.8</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>Safari 2.0</td>
<td class="center">419.3</td>
<td class="center">A</td>
<td class="center">1</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>Safari 3.0</td>
<td class="center">522.1</td>
<td class="center">A</td>
<td class="center">2.2</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>OmniWeb 5.5</td>
<td class="center">420</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>iPod Touch / iPhone</td>
<td class="center">420.1</td>
<td class="center">A</td>
<td class="center">0.05</td>
</tr>
<tr class="gradeA">
<td>Webkit</td>
<td>S60</td>
<td class="center">413</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 7.0</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 7.5</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 8.0</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 8.5</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 9.0</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.1</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 9.2</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.2</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera 9.5</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.8</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Opera for Wii</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Nokia N800</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Presto</td>
<td>Nintendo DS browser</td>
<td class="center">8.5</td>
<td class="center">C/A<sup>1</sup></td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>KHTML</td>
<td>Konqureror 3.1</td>
<td class="center">3.1</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>KHTML</td>
<td>Konqureror 3.3</td>
<td class="center">3.3</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>KHTML</td>
<td>Konqureror 3.5</td>
<td class="center">3.5</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeX">
<td>Tasman</td>
<td>Internet Explorer 4.5</td>
<td class="center">-</td>
<td class="center">X</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>Tasman</td>
<td>Internet Explorer 5.1</td>
<td class="center">1</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>Tasman</td>
<td>Internet Explorer 5.2</td>
<td class="center">1</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Misc</td>
<td>NetFront 3.1</td>
<td class="center">-</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeA">
<td>Misc</td>
<td>NetFront 3.4</td>
<td class="center">-</td>
<td class="center">A</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeX">
<td>Misc</td>
<td>Dillo 0.8</td>
<td class="center">-</td>
<td class="center">X</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeX">
<td>Misc</td>
<td>Links</td>
<td class="center">-</td>
<td class="center">X</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeX">
<td>Misc</td>
<td>Lynx</td>
<td class="center">-</td>
<td class="center">X</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>Misc</td>
<td>IE Mobile</td>
<td class="center">-</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeC">
<td>Misc</td>
<td>PSP browser</td>
<td class="center">-</td>
<td class="center">C</td>
<td class="center">0.01</td>
</tr>
<tr class="gradeU">
<td>Other browsers</td>
<td>All others</td>
<td class="center">-</td>
<td class="center">U</td>
<td class="center">0.04</td>
</tr>
</tbody>
<tfoot>
<tr>
<th style="text-align:right" colspan="4">Total:</th>
<th></th>
</tr>
</tfoot>
</table>
</div>
<div class="spacer"></div>
<p class="space">Warning! The market share information given in this table is <b>fabricated</b> using a combination of (mild) judgement, the <a href="http://www.mail-archive.com/backstage@lists.bbc.co.uk/msg03899.html">BBC Browser Statistics</a> information and statistics from <a href="http://www.thecounter.com/stats/2008/August/browser.php">TheCounter.com</a>. THe lowest usage given to anyone browser is 0.01 for reasons of this example.</p>
<h1>Initialisation code</h1>
<pre class="brush: js;">$(document).ready(function() {
$('#example').dataTable( {
"fnFooterCallback": function ( nRow, aaData, iStart, iEnd, aiDisplay ) {
/*
* Calculate the total market share for all browsers in this table (ie inc. outside
* the pagination)
*/
var iTotalMarket = 0;
for ( var i=0 ; i<aaData.length ; i++ )
{
iTotalMarket += aaData[i][4]*1;
}
/* Calculate the market share for browsers on this page */
var iPageMarket = 0;
for ( var i=iStart ; i<iEnd ; i++ )
{
iPageMarket += aaData[ aiDisplay[i] ][4]*1;
}
/* Modify the footer row to match what we want */
var nCells = nRow.getElementsByTagName('th');
nCells[1].innerHTML = parseInt(iPageMarket * 100)/100 +
'% ('+ parseInt(iTotalMarket * 100)/100 +'% total)';
}
} );
} );</pre>
<style type="text/css">
@import "../examples_support/syntax/css/shCore.css";
</style>
<script type="text/javascript" language="javascript" src="../examples_support/syntax/js/shCore.js"></script>
<h1>Other examples</h1>
<div class="demo_links">
<h2>Basic initialisation</h2>
<ul>
<li><a href="../basic_init/zero_config.html">Zero configuration</a></li>
<li><a href="../basic_init/filter_only.html">Feature enablement</a></li>
<li><a href="../basic_init/table_sorting.html">Sorting data</a></li>
<li><a href="../basic_init/multi_col_sort.html">Multi-column sorting</a></li>
<li><a href="../basic_init/multiple_tables.html">Multiple tables</a></li>
<li><a href="../basic_init/hidden_columns.html">Hidden columns</a></li>
<li><a href="../basic_init/complex_header.html">Complex headers - grouping with colspan</a></li>
<li><a href="../basic_init/dom.html">DOM positioning</a></li>
<li><a href="../basic_init/flexible_width.html">Flexible table width</a></li>
<li><a href="../basic_init/state_save.html">State saving</a></li>
<li><a href="../basic_init/alt_pagination.html">Alternative pagination styles</a></li>
<li>Scrolling: <br>
<a href="../basic_init/scroll_x.html">Horizontal</a> /
<a href="../basic_init/scroll_y.html">Vertical</a> /
<a href="../basic_init/scroll_xy.html">Both</a> /
<a href="../basic_init/scroll_y_theme.html">Themed</a> /
<a href="../basic_init/scroll_y_infinite.html">Infinite</a>
</li>
<li><a href="../basic_init/language.html">Change language information (internationalisation)</a></li>
<li><a href="../basic_init/themes.html">ThemeRoller themes (Smoothness)</a></li>
</ul>
<h2>Advanced initialisation</h2>
<ul>
<li>Events: <br>
<a href="../advanced_init/events_live.html">Live events</a> /
<a href="../advanced_init/events_pre_init.html">Pre-init</a> /
<a href="../advanced_init/events_post_init.html">Post-init</a>
</li>
<li><a href="../advanced_init/column_render.html">Column rendering</a></li>
<li><a href="../advanced_init/html_sort.html">Sorting without HTML tags</a></li>
<li><a href="../advanced_init/dom_multiple_elements.html">Multiple table controls (sDom)</a></li>
<li><a href="../advanced_init/length_menu.html">Defining length menu options</a></li>
<li><a href="../advanced_init/complex_header.html">Complex headers and hidden columns</a></li>
<li><a href="../advanced_init/dom_toolbar.html">Custom toolbar (element) around table</a></li>
<li><a href="../advanced_init/highlight.html">Row highlighting with CSS</a></li>
<li><a href="../advanced_init/row_grouping.html">Row grouping</a></li>
<li><a href="../advanced_init/row_callback.html">Row callback</a></li>
<li><a href="../advanced_init/footer_callback.html">Footer callback</a></li>
<li><a href="../advanced_init/sorting_control.html">Control sorting direction of columns</a></li>
<li><a href="../advanced_init/language_file.html">Change language information from a file (internationalisation)</a></li>
<li><a href="../advanced_init/defaults.html">Setting defaults</a></li>
<li><a href="../advanced_init/localstorage.html">State saving with localStorage</a></li>
<li><a href="../advanced_init/dt_events.html">Custom events</a></li>
</ul>
<h2>API</h2>
<ul>
<li><a href="../api/add_row.html">Dynamically add a new row</a></li>
<li><a href="../api/multi_filter.html">Individual column filtering (using "input" elements)</a></li>
<li><a href="../api/multi_filter_select.html">Individual column filtering (using "select" elements)</a></li>
<li><a href="../api/highlight.html">Highlight rows and columns</a></li>
<li><a href="../api/row_details.html">Show and hide details about a particular record</a></li>
<li><a href="../api/select_row.html">User selectable rows (multiple rows)</a></li>
<li><a href="../api/select_single_row.html">User selectable rows (single row) and delete rows</a></li>
<li><a href="../api/editable.html">Editable rows (with jEditable)</a></li>
<li><a href="../api/form.html">Submit form with elements in table</a></li>
<li><a href="../api/counter_column.html">Index column (static number column)</a></li>
<li><a href="../api/show_hide.html">Show and hide columns dynamically</a></li>
<li><a href="../api/api_in_init.html">API function use in initialisation object (callback)</a></li>
<li><a href="../api/tabs_and_scrolling.html">DataTables scrolling and tabs</a></li>
<li><a href="../api/regex.html">Regular expression filtering</a></li>
</ul>
</div>
<div class="demo_links">
<h2>Data sources</h2>
<ul>
<li><a href="../data_sources/dom.html">DOM</a></li>
<li><a href="../data_sources/js_array.html">Javascript array</a></li>
<li><a href="../data_sources/ajax.html">Ajax source</a></li>
<li><a href="../data_sources/server_side.html">Server side processing</a></li>
</ul>
<h2>Server-side processing</h2>
<ul>
<li><a href="../server_side/server_side.html">Obtain server-side data</a></li>
<li><a href="../server_side/custom_vars.html">Add extra HTTP variables</a></li>
<li><a href="../server_side/post.html">Use HTTP POST</a></li>
<li><a href="../server_side/ids.html">Automatic addition of IDs and classes to rows</a></li>
<li><a href="../server_side/object_data.html">Reading table data from objects</a></li>
<li><a href="../server_side/row_details.html">Show and hide details about a particular record</a></li>
<li><a href="../server_side/select_rows.html">User selectable rows (multiple rows)</a></li>
<li><a href="../server_side/jsonp.html">JSONP for a cross domain data source</a></li>
<li><a href="../server_side/editable.html">jEditable integration with DataTables</a></li>
<li><a href="../server_side/defer_loading.html">Deferred loading of Ajax data</a></li>
<li><a href="../server_side/pipeline.html">Pipelining data (reduce Ajax calls for paging)</a></li>
</ul>
<h2>Ajax data source</h2>
<ul>
<li><a href="../ajax/ajax.html">Ajax sourced data (array of arrays)</a></li>
<li><a href="../ajax/objects.html">Ajax sourced data (array of objects)</a></li>
<li><a href="../ajax/defer_render.html">Deferred DOM creation for extra speed</a></li>
<li><a href="../ajax/null_data_source.html">Empty data source columns</a></li>
<li><a href="../ajax/custom_data_property.html">Use a data source other than aaData (the default)</a></li>
<li><a href="../ajax/objects_subarrays.html">Read column data from sub-arrays</a></li>
<li><a href="../ajax/deep.html">Read column data from deeply nested properties</a></li>
</ul>
<h2>Plug-ins</h2>
<ul>
<li><a href="../plug-ins/plugin_api.html">Add custom API functions</a></li>
<li><a href="../plug-ins/sorting_plugin.html">Sorting and automatic type detection</a></li>
<li><a href="../plug-ins/sorting_sType.html">Sorting without automatic type detection</a></li>
<li><a href="../plug-ins/paging_plugin.html">Custom pagination controls</a></li>
<li><a href="../plug-ins/range_filtering.html">Range filtering / custom filtering</a></li>
<li><a href="../plug-ins/dom_sort.html">Live DOM sorting</a></li>
<li><a href="../plug-ins/html_sort.html">Automatic HTML type detection</a></li>
</ul>
</div>
<div id="footer" class="clear" style="text-align:center;">
<p>
Please refer to the <a href="http://www.datatables.net/usage">DataTables documentation</a> for full information about its API properties and methods.<br>
Additionally, there are a wide range of <a href="http://www.datatables.net/extras">extras</a> and <a href="http://www.datatables.net/plug-ins">plug-ins</a> which extend the capabilities of DataTables.
</p>
<span style="font-size:10px;">
DataTables designed and created by <a href="http://www.sprymedia.co.uk">Allan Jardine</a> © 2007-2011<br>
DataTables is dual licensed under the <a href="http://www.datatables.net/license_gpl2">GPL v2 license</a> or a <a href="http://www.datatables.net/license_bsd">BSD (3-point) license</a>.
</span>
</div>
</div>
</body>
</html> | stevemoore113/ch_web_- | 資源/Facemash/DataTables-1.9.4/examples/advanced_init/footer_callback.html | HTML | mit | 21,772 |
require "spec_helper"
describe Mongoid::Changeable do
describe "#attribute_change" do
context "when the attribute has changed from the persisted value" do
context "when using the setter" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person.title = "Captain Obvious"
end
it "returns an array of the old value and new value" do
expect(person.send(:attribute_change, "title")).to eq(
[ "Grand Poobah", "Captain Obvious" ]
)
end
it "allows access via (attribute)_change" do
expect(person.title_change).to eq(
[ "Grand Poobah", "Captain Obvious" ]
)
end
context "when the field is aliased" do
let(:person) do
Person.new(test: "Aliased 1").tap(&:move_changes)
end
before do
person.test = "Aliased 2"
end
it "returns an array of the old value and new value" do
expect(person.send(:attribute_change, "test")).to eq(
[ "Aliased 1", "Aliased 2" ]
)
end
it "allows access via (attribute)_change" do
expect(person.test_change).to eq(
[ "Aliased 1", "Aliased 2" ]
)
end
end
end
context "when using [] methods" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person[:title] = "Captain Obvious"
end
it "returns an array of the old value and new value" do
expect(person.send(:attribute_change, "title")).to eq(
[ "Grand Poobah", "Captain Obvious" ]
)
end
it "allows access via (attribute)_change" do
expect(person.title_change).to eq(
[ "Grand Poobah", "Captain Obvious" ]
)
end
end
end
context "when the attribute has changed from the default value" do
context "when using the setter" do
let(:person) do
Person.new(pets: true)
end
it "returns an array of nil and new value" do
expect(person.send(:attribute_change, "pets")).to eq([ nil, true ])
end
it "allows access via (attribute)_change" do
expect(person.pets_change).to eq([ nil, true ])
end
end
context "when using [] methods" do
context "when the field is defined" do
let(:person) do
Person.new
end
before do
person[:pets] = true
end
it "returns an array of nil and new value" do
expect(person.send(:attribute_change, "pets")).to eq([ nil, true ])
end
it "allows access via (attribute)_change" do
expect(person.pets_change).to eq([ nil, true ])
end
end
context "when the field is not defined" do
let(:person) do
Person.new
end
before do
person[:t] = "test"
end
it "returns an array of nil and new value" do
expect(person.send(:attribute_change, "t")).to eq([ nil, "test" ])
end
end
end
end
context "when the attribute changes multiple times" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person.title = "Captain Obvious"
person.title = "Dark Helmet"
end
it "returns an array of the original value and new value" do
expect(person.send(:attribute_change, "title")).to eq(
[ "Grand Poobah", "Dark Helmet" ]
)
end
it "allows access via (attribute)_change" do
expect(person.title_change).to eq(
[ "Grand Poobah", "Dark Helmet" ]
)
end
end
context "when the attribute is modified in place" do
context "when the attribute is an array" do
let(:person) do
Person.new(aliases: [ "Grand Poobah" ]).tap(&:move_changes)
end
before do
person.aliases[0] = "Dark Helmet"
end
it "returns an array of the original value and new value" do
expect(person.send(:attribute_change, "aliases")).to eq(
[[ "Grand Poobah" ], [ "Dark Helmet" ]]
)
end
it "allows access via (attribute)_change" do
expect(person.aliases_change).to eq(
[[ "Grand Poobah" ], [ "Dark Helmet" ]]
)
end
context "when the attribute changes multiple times" do
before do
person.aliases << "Colonel Sanders"
end
it "returns an array of the original value and new value" do
expect(person.send(:attribute_change, "aliases")).to eq(
[[ "Grand Poobah" ], [ "Dark Helmet", "Colonel Sanders" ]]
)
end
end
end
context "when the attribute is a hash" do
let(:person) do
Person.new(map: { location: "Home" }).tap(&:move_changes)
end
before do
person.map[:location] = "Work"
end
it "returns an array of the original value and new value" do
expect(person.send(:attribute_change, "map")).to eq(
[{ location: "Home" }, { location: "Work" }]
)
end
it "allows access via (attribute)_change" do
expect(person.map_change).to eq(
[{ location: "Home" }, { location: "Work" }]
)
end
context "when the attribute changes multiple times" do
before do
person.map[:lat] = 20.0
end
it "returns an array of the original value and new value" do
expect(person.send(:attribute_change, "map")).to eq(
[{ location: "Home" }, { location: "Work", lat: 20.0 }]
)
end
end
context "when the values are arrays" do
let(:map) do
{
"stack1" => [ 1, 2, 3, 4 ],
"stack2" => [ 1, 2, 3, 4 ],
"stack3" => [ 1, 2, 3, 4 ]
}
end
before do
person.map = map
person.move_changes
end
context "when reordering the arrays inline" do
before do
person.map["stack1"].reverse!
end
it "flags the attribute as changed" do
expect(person.send(:attribute_change, "map")).to eq(
[
{
"stack1" => [ 1, 2, 3, 4 ],
"stack2" => [ 1, 2, 3, 4 ],
"stack3" => [ 1, 2, 3, 4 ]
},
{
"stack1" => [ 4, 3, 2, 1 ],
"stack2" => [ 1, 2, 3, 4 ],
"stack3" => [ 1, 2, 3, 4 ]
},
]
)
end
end
end
end
end
context "when the attribute has not changed from the persisted value" do
let(:person) do
Person.new(title: nil)
end
it "returns nil" do
expect(person.send(:attribute_change, "title")).to be_nil
end
end
context "when the attribute has not changed from the default value" do
context "when the attribute differs from the persisted value" do
let(:person) do
Person.new
end
it "returns the change" do
expect(person.send(:attribute_change, "pets")).to eq([ nil, false ])
end
end
context "when the attribute does not differ from the persisted value" do
let(:person) do
Person.instantiate("pets" => false)
end
it "returns nil" do
expect(person.send(:attribute_change, "pets")).to be_nil
end
end
end
context "when the attribute has been set with the same value" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person.title = "Grand Poobah"
end
it "returns an empty array" do
expect(person.send(:attribute_change, "title")).to be_nil
end
end
context "when the attribute is removed" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person.remove_attribute(:title)
end
it "returns an empty array" do
expect(person.send(:attribute_change, "title")).to eq(
[ "Grand Poobah", nil ]
)
end
end
end
describe "#attribute_changed?" do
context "when the attribute has changed from the persisted value" do
let(:person) do
Person.new(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
it "returns true" do
expect(person.send(:attribute_changed?, "title")).to be true
end
it "allows access via (attribute)_changed?" do
expect(person.title_changed?).to be true
end
context "when the field is aliased" do
let(:person) do
Person.new(test: "Aliased 1")
end
before do
person.test = "Aliased 2"
end
it "returns true" do
expect(person.send(:attribute_changed?, "test")).to be true
end
it "allows access via (attribute)_changed?" do
expect(person.test_changed?).to be true
end
end
end
context "when the attribute has changed from the default value" do
let(:person) do
Person.new
end
before do
person.pets = true
end
it "returns true" do
expect(person.send(:attribute_changed?, "pets")).to be true
end
it "allows access via (attribute)_changed?" do
expect(person.pets_changed?).to be true
end
end
context "when the attribute has not changed the persisted value" do
let!(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
it "returns false" do
expect(person.send(:attribute_changed?, "title")).to be false
end
end
context "when the attribute has not changed from the default value" do
context "when the attribute is not enumerable" do
context "when the attribute differs from the persisted value" do
let!(:person) do
Person.new
end
it "returns true" do
expect(person.send(:attribute_changed?, "pets")).to be true
end
end
context "when the attribute does not differ from the persisted value" do
let!(:person) do
Person.instantiate("pets" => false)
end
it "returns false" do
expect(person.send(:attribute_changed?, "pets")).to be false
end
end
end
context "when the attribute is an array" do
let!(:person) do
Person.new(aliases: [ "Bond" ])
end
context "when the array is only accessed" do
before do
person.move_changes
person.aliases
end
it "returns false" do
expect(person).to_not be_aliases_changed
end
end
end
context "when the attribute is a hash" do
let!(:person) do
Person.new(map: { key: "value" })
end
context "when the hash is only accessed" do
before do
person.move_changes
person.map
end
it "returns false" do
expect(person).to_not be_map_changed
end
end
end
end
end
describe "#attribute_changed_from_default?" do
context "when the attribute differs from the default value" do
let(:person) do
Person.new(age: 33)
end
it "returns true" do
expect(person).to be_age_changed_from_default
end
end
context "when the attribute is the same as the default" do
let(:person) do
Person.new
end
it "returns false" do
expect(person).to_not be_age_changed_from_default
end
end
end
describe "#attribute_was" do
context "when the attribute has changed from the persisted value" do
let(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
before do
person.title = "Captain Obvious"
end
it "returns the old value" do
expect(person.send(:attribute_was, "title")).to eq("Grand Poobah")
end
it "allows access via (attribute)_was" do
expect(person.title_was).to eq("Grand Poobah")
end
context "when the field is aliased" do
let(:person) do
Person.new(test: "Aliased 1").tap(&:move_changes)
end
before do
person.test = "Aliased 2"
end
it "returns the old value" do
expect(person.send(:attribute_was, "test")).to eq("Aliased 1")
end
it "allows access via (attribute)_was" do
expect(person.test_was).to eq("Aliased 1")
end
end
end
context "when the attribute has not changed from the persisted value" do
let!(:person) do
Person.new(title: "Grand Poobah").tap(&:move_changes)
end
it "returns the original value" do
expect(person.send(:attribute_was, "title")).to eq("Grand Poobah")
end
end
end
describe "#attribute_will_change!" do
let(:aliases) do
[ "007" ]
end
let(:person) do
Person.new(aliases: aliases, test: "Aliased 1")
end
before do
person.changed_attributes.clear
end
context "when the value has not changed" do
before do
person.aliases_will_change!
end
let(:changes) do
person.changes
end
it "does not return the value in the changes" do
expect(changes).to be_empty
end
it "is not flagged as changed" do
expect(person).to_not be_changed
end
end
context "when the value has changed" do
before do
person.aliases_will_change!
person.aliases << "008"
end
let(:changes) do
person.changes
end
it "returns the value in the changes" do
expect(changes).to eq({ "aliases" => [[ "007" ], [ "007", "008" ]] })
end
end
context "when the value is duplicable" do
context "when the attribute has not been cloned" do
before do
person.aliases_will_change!
end
let(:changed) do
person.changed_attributes
end
it "clones the value" do
expect(changed["aliases"]).to_not equal(aliases)
end
it "puts the old value in the changes" do
expect(changed["aliases"]).to eq(aliases)
end
end
context "when the attribute has been flagged" do
before do
person.changed_attributes["aliases"] = aliases
expect(aliases).to receive(:clone).never
person.aliases_will_change!
end
let(:changed) do
person.changed_attributes
end
it "does not clone the value" do
expect(changed["aliases"]).to equal(aliases)
end
it "retains the first value in the changes" do
expect(changed["aliases"]).to eq(aliases)
end
end
end
end
describe "#changed" do
context "when the document has changed" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
it "returns an array of changed field names" do
expect(person.changed).to include("title")
end
end
context "when the document has not changed" do
let(:person) do
Person.instantiate({})
end
it "does not include non changed fields" do
expect(person.changed).to_not include("title")
end
end
context "when the document is embedded" do
let(:person) do
Person.create
end
let!(:name) do
person.create_name(first_name: "Layne", last_name: "Staley")
end
context "when changing attributes via []" do
before do
person.name["a"] = "testing"
end
it "returns true" do
expect(person.name).to be_changed
end
end
end
end
describe "#changed?" do
context "when the document has changed" do
let(:person) do
Person.new(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
it "returns true" do
expect(person).to be_changed
end
end
context "when a hash field has been accessed" do
context "when the field has not changed" do
let(:person) do
Person.create(map: { name: "value" })
end
before do
person.map
end
it "returns false" do
expect(person).to_not be_changed
end
end
context "when the field is changed" do
let(:person) do
Person.create(map: { name: "value" })
end
before do
person.map = { name: "another" }
end
it "returns true" do
expect(person).to be_changed
end
end
context "when a dynamic field is changed in place" do
let(:person) do
Person.create(other_name: { full: {first: 'first', last: 'last'} })
end
before do
person.other_name[:full][:first] = 'Name'
end
it "returns true" do
expect(person.changes).to_not be_empty
expect(person).to be_changed
end
end
end
context "when the document has not changed" do
let(:acolyte) do
Acolyte.instantiate("_id" => BSON::ObjectId.new)
end
it "returns false" do
expect(acolyte).to_not be_changed
end
end
context "when a child has changed" do
let(:person) do
Person.create
end
let!(:address) do
person.addresses.create(street: "hobrecht")
end
before do
address.number = 10
end
it "returns true" do
expect(person).to be_changed
end
end
context "when a deeply embedded child has changed" do
let(:person) do
Person.create
end
let(:address) do
person.addresses.create(street: "hobrecht")
end
let!(:location) do
address.locations.create(name: "home")
end
before do
location.name = "work"
end
it "returns true" do
expect(person).to be_changed
end
end
context "when a child is new" do
let(:person) do
Person.create
end
let!(:address) do
person.addresses.build(street: "hobrecht")
end
it "returns true" do
expect(person).to be_changed
end
end
context "when a deeply embedded child is new" do
let(:person) do
Person.create
end
let(:address) do
person.addresses.create(street: "hobrecht")
end
let!(:location) do
address.locations.build(name: "home")
end
it "returns true" do
expect(person).to be_changed
end
end
end
describe "#changes" do
context "when the document has changed" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
it "returns a hash of changes" do
expect(person.changes["title"]).to eq(
[ nil, "Captain Obvious" ]
)
end
it "returns a hash with indifferent access" do
expect(person.changes["title"]).to eq(
[ nil, "Captain Obvious" ]
)
end
end
context "when the document has not changed" do
let(:acolyte) do
Acolyte.instantiate("_id" => BSON::ObjectId.new)
end
it "returns an empty hash" do
expect(acolyte.changes).to be_empty
end
end
end
describe "#setters" do
context "when the document has changed" do
let(:person) do
Person.new(aliases: [ "007" ]).tap do |p|
p.new_record = false
p.move_changes
end
end
context "when an array field has changed" do
context "when the array has values removed" do
before do
person.aliases.delete_one("007")
end
let!(:setters) do
person.setters
end
it "contains array changes in the setters" do
expect(setters).to eq({ "aliases" => [] })
end
end
context "when the array has values added" do
before do
person.aliases << "008"
end
let!(:setters) do
person.setters
end
it "contains array changes in the setters" do
expect(setters).to eq({ "aliases" => [ "007", "008" ] })
end
end
context "when the array has changed completely" do
before do
person.aliases << "008"
person.aliases.delete_one("007")
end
let!(:setters) do
person.setters
end
it "does not contain array changes in the setters" do
expect(setters).to eq({ "aliases" => [ "008" ]})
end
end
end
context "when the document is a root document" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
it "returns a hash of field names and new values" do
expect(person.setters["title"]).to eq("Captain Obvious")
end
end
context "when the document is embedded" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
let(:address) do
Address.instantiate(street: "Oxford St")
end
before do
person.addresses << address
person.instance_variable_set(:@new_record, false)
address.instance_variable_set(:@new_record, false)
address.street = "Bond St"
end
it "returns a hash of field names and new values" do
expect(address.setters).to eq(
{ "addresses.0.street" => "Bond St" }
)
end
context "when the document is embedded multiple levels" do
let(:location) do
Location.new(name: "Home")
end
before do
location.instance_variable_set(:@new_record, false)
address.locations << location
location.name = "Work"
end
it "returns the proper hash with locations" do
expect(location.setters).to eq(
{ "addresses.0.locations.0.name" => "Work" }
)
end
end
end
end
context "when the document has not changed" do
let(:acolyte) do
Acolyte.instantiate("_id" => BSON::ObjectId.new)
end
it "returns an empty hash" do
expect(acolyte.setters).to be_empty
end
end
end
describe "#previous_changes" do
let(:person) do
Person.new(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
end
context "when the document has been saved" do
before do
person.save!
end
it "returns the changes before the save" do
expect(person.previous_changes["title"]).to eq(
[ nil, "Captain Obvious" ]
)
end
end
context "when the document has not been saved" do
it "returns an empty hash" do
expect(person.previous_changes).to be_empty
end
end
end
describe "#move_changes" do
let(:person) do
Person.new(title: "Sir")
end
before do
person.atomic_pulls["addresses"] = Address.new
person.atomic_unsets << Address.new
person.delayed_atomic_sets["addresses"] = Address.new
person.move_changes
end
it "clears the atomic pulls" do
expect(person.atomic_pulls).to be_empty
end
it "clears the atomic unsets" do
expect(person.atomic_unsets).to be_empty
end
it "clears the delayed atomic sets" do
expect(person.delayed_atomic_sets).to be_empty
end
it "clears the changed attributes" do
expect(person.changed_attributes).to be_empty
end
end
describe "#reset_attribute!" do
context "when the attribute has changed" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
before do
person.title = "Captain Obvious"
person.send(:reset_attribute!, "title")
end
it "resets the value to the original" do
expect(person.title).to be_nil
end
it "allows access via reset_(attribute)!" do
expect(person.title).to be_nil
end
it "removes the field from the changes" do
expect(person.changed).to_not include("title")
end
context "when the field is aliased" do
let(:person) do
Person.instantiate(test: "Aliased 1")
end
before do
person.test = "Aliased 2"
person.send(:reset_attribute!, "test")
end
it "resets the value to the original" do
expect(person.test).to be_nil
end
it "removes the field from the changes" do
expect(person.changed).to_not include("test")
end
end
end
context "when the attribute has not changed" do
let(:person) do
Person.instantiate(title: "Grand Poobah")
end
before do
person.send(:reset_attribute!, "title")
end
it "does nothing" do
expect(person.title).to be_nil
end
end
end
context "when fields have been defined pre-dirty inclusion" do
let(:document) do
Dokument.new
end
it "defines a _change method" do
expect(document.updated_at_change).to be_nil
end
it "defines a _changed? method" do
expect(document.updated_at_changed?).to be false
end
it "defines a _changes method" do
expect(document.updated_at_was).to be_nil
end
end
context "when only embedded documents change" do
let!(:person) do
Person.create
end
context "when the child is an embeds one" do
context "when the child is new" do
let!(:name) do
person.build_name(first_name: "Gordon", last_name: "Ramsay")
end
it "flags the parent as changed" do
expect(person).to be_changed
end
end
context "when the child is modified" do
let!(:name) do
person.create_name(first_name: "Gordon", last_name: "Ramsay")
end
before do
name.first_name = "G"
end
it "flags the parent as changed" do
expect(person).to be_changed
end
end
context "when the child is not modified" do
let!(:name) do
person.create_name(first_name: "Gordon", last_name: "Ramsay")
end
it "does not flag the parent as changed" do
expect(person).to_not be_changed
end
end
end
context "when the child is an embeds many" do
context "when a child is new" do
let!(:address) do
person.addresses.build(street: "jakobstr.")
end
it "flags the parent as changed" do
expect(person).to be_changed
end
end
context "when a child is modified" do
let!(:address) do
person.addresses.create(street: "jakobstr.")
end
before do
address.city = "Berlin"
end
it "flags the parent as changed" do
expect(person).to be_changed
end
end
context "when no child is modified" do
let!(:address) do
person.addresses.create(street: "skalitzerstr.")
end
it "does not flag the parent as changed" do
expect(person).to_not be_changed
end
end
end
end
context "when changing a hash of hashes" do
let!(:person) do
Person.create(map: { "test" => {}})
end
before do
person.map["test"]["value"] = 10
end
it "records the changes" do
expect(person.changes).to eq(
{ "map" => [{ "test" => {}}, { "test" => { "value" => 10 }}]}
)
end
end
context "when modifying a many to many key" do
let!(:person) do
Person.create
end
let!(:preference) do
Preference.create(name: "dirty")
end
before do
person.update_attributes(preference_ids: [ preference.id ])
end
it "records the foreign key dirty changes" do
expect(person.previous_changes["preference_ids"]).to eq(
[nil, [ preference.id ]]
)
end
end
context "when accessing an array field" do
let!(:person) do
Person.create
end
let(:from_db) do
Person.find(person.id)
end
context "when the field is not changed" do
before do
from_db.preference_ids
end
it "flags the change" do
expect(from_db.changes["preference_ids"]).to eq([ nil, []])
end
it "does not include the changes in the setters" do
expect(from_db.setters).to be_empty
end
end
end
context "when reloading an unchanged document" do
let!(:person) do
Person.create
end
let(:from_db) do
Person.find(person.id)
end
before do
from_db.reload
end
it "clears the changed attributes" do
expect(from_db.changed_attributes).to be_empty
end
end
context "when fields are getting changed" do
let(:person) do
Person.create(
title: "MC",
some_dynamic_field: 'blah'
)
end
before do
person.title = "DJ"
person.write_attribute(:ssn, "222-22-2222")
person.some_dynamic_field = 'bloop'
end
it "marks the document as changed" do
expect(person).to be_changed
end
it "marks field changes" do
expect(person.changes).to eq({
"title" => [ "MC", "DJ" ],
"ssn" => [ nil, "222-22-2222" ],
"some_dynamic_field" => [ "blah", "bloop" ]
})
end
it "marks changed fields" do
expect(person.changed).to eq([ "title", "ssn", "some_dynamic_field" ])
end
it "marks the field as changed" do
expect(person.title_changed?).to be true
end
it "stores previous field values" do
expect(person.title_was).to eq("MC")
end
it "marks field changes" do
expect(person.title_change).to eq([ "MC", "DJ" ])
end
it "allows reset of field changes" do
person.reset_title!
expect(person.title).to eq("MC")
expect(person.changed).to eq([ "ssn", "some_dynamic_field" ])
end
context "after a save" do
before do
person.save!
end
it "clears changes" do
expect(person).to_not be_changed
end
it "stores previous changes" do
expect(person.previous_changes["title"]).to eq([ "MC", "DJ" ])
expect(person.previous_changes["ssn"]).to eq([ nil, "222-22-2222" ])
end
end
context "when the previous value is nil" do
before do
person.score = 100
person.reset_score!
end
it "removes the attribute from the document" do
expect(person.score).to be_nil
end
end
end
context "when accessing dirty attributes in callbacks" do
context "when the document is persisted" do
let!(:acolyte) do
Acolyte.create(name: "callback-test")
end
before do
Acolyte.set_callback(:save, :after, if: :callback_test?) do |doc|
doc[:changed_in_callback] = doc.changes.dup
end
end
after do
Acolyte._save_callbacks.select do |callback|
callback.kind == :after
end.each do |callback|
Acolyte._save_callbacks.delete(callback)
end
end
it "retains the changes until after all callbacks" do
acolyte.update_attribute(:status, "testing")
expect(acolyte.changed_in_callback).to eq({ "status" => [ nil, "testing" ] })
end
end
context "when the document is new" do
let!(:acolyte) do
Acolyte.new(name: "callback-test")
end
before do
Acolyte.set_callback(:save, :after, if: :callback_test?) do |doc|
doc[:changed_in_callback] = doc.changes.dup
end
end
after do
Acolyte._save_callbacks.select do |callback|
callback.kind == :after
end.each do |callback|
Acolyte._save_callbacks.delete(callback)
end
end
it "retains the changes until after all callbacks" do
acolyte.save
expect(acolyte.changed_in_callback["name"]).to eq([ nil, "callback-test" ])
end
end
end
context "when associations are getting changed" do
let(:person) do
Person.create(addresses: [ Address.new ])
end
before do
person.addresses = [ Address.new ]
end
it "does not set the association to nil when hitting the database" do
expect(person.setters).to_not eq({ "addresses" => nil })
end
end
end
| brixen/mongoid | spec/mongoid/changeable_spec.rb | Ruby | mit | 33,779 |
// SPDX-License-Identifier: GPL-2.0
#include <linux/perf_event.h>
#include <linux/nospec.h>
#include <asm/intel-family.h>
enum perf_msr_id {
PERF_MSR_TSC = 0,
PERF_MSR_APERF = 1,
PERF_MSR_MPERF = 2,
PERF_MSR_PPERF = 3,
PERF_MSR_SMI = 4,
PERF_MSR_PTSC = 5,
PERF_MSR_IRPERF = 6,
PERF_MSR_THERM = 7,
PERF_MSR_THERM_SNAP = 8,
PERF_MSR_THERM_UNIT = 9,
PERF_MSR_EVENT_MAX,
};
static bool test_aperfmperf(int idx)
{
return boot_cpu_has(X86_FEATURE_APERFMPERF);
}
static bool test_ptsc(int idx)
{
return boot_cpu_has(X86_FEATURE_PTSC);
}
static bool test_irperf(int idx)
{
return boot_cpu_has(X86_FEATURE_IRPERF);
}
static bool test_therm_status(int idx)
{
return boot_cpu_has(X86_FEATURE_DTHERM);
}
static bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 != 6)
return false;
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_NEHALEM:
case INTEL_FAM6_NEHALEM_G:
case INTEL_FAM6_NEHALEM_EP:
case INTEL_FAM6_NEHALEM_EX:
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_SANDYBRIDGE_X:
case INTEL_FAM6_IVYBRIDGE:
case INTEL_FAM6_IVYBRIDGE_X:
case INTEL_FAM6_HASWELL_CORE:
case INTEL_FAM6_HASWELL_X:
case INTEL_FAM6_HASWELL_ULT:
case INTEL_FAM6_HASWELL_GT3E:
case INTEL_FAM6_BROADWELL_CORE:
case INTEL_FAM6_BROADWELL_XEON_D:
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_X:
case INTEL_FAM6_ATOM_AIRMONT:
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_X:
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
if (idx == PERF_MSR_SMI)
return true;
break;
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
}
return false;
}
struct perf_msr {
u64 msr;
struct perf_pmu_events_attr *attr;
bool (*test)(int idx);
};
PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00" );
PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01" );
PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02" );
PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03" );
PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04" );
PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05" );
PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06" );
PMU_EVENT_ATTR_STRING(cpu_thermal_margin, evattr_therm, "event=0x07" );
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, evattr_therm_snap, "1" );
PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, evattr_therm_unit, "C" );
static struct perf_msr msr[] = {
[PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
[PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
[PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
[PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
[PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
[PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
[PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
[PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &evattr_therm, test_therm_status, },
[PERF_MSR_THERM_SNAP] = { MSR_IA32_THERM_STATUS, &evattr_therm_snap, test_therm_status, },
[PERF_MSR_THERM_UNIT] = { MSR_IA32_THERM_STATUS, &evattr_therm_unit, test_therm_status, },
};
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
NULL,
};
static struct attribute_group events_attr_group = {
.name = "events",
.attrs = events_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-63");
static struct attribute *format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group format_attr_group = {
.name = "format",
.attrs = format_attrs,
};
static const struct attribute_group *attr_groups[] = {
&events_attr_group,
&format_attr_group,
NULL,
};
static int msr_event_init(struct perf_event *event)
{
u64 cfg = event->attr.config;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
event->attr.exclude_hv ||
event->attr.exclude_idle ||
event->attr.exclude_host ||
event->attr.exclude_guest ||
event->attr.sample_period) /* no sampling */
return -EINVAL;
if (cfg >= PERF_MSR_EVENT_MAX)
return -EINVAL;
cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
if (!msr[cfg].attr)
return -EINVAL;
event->hw.idx = -1;
event->hw.event_base = msr[cfg].msr;
event->hw.config = cfg;
return 0;
}
static inline u64 msr_read_counter(struct perf_event *event)
{
u64 now;
if (event->hw.event_base)
rdmsrl(event->hw.event_base, now);
else
now = rdtsc_ordered();
return now;
}
static void msr_event_update(struct perf_event *event)
{
u64 prev, now;
s64 delta;
/* Careful, an NMI might modify the previous event value: */
again:
prev = local64_read(&event->hw.prev_count);
now = msr_read_counter(event);
if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
goto again;
delta = now - prev;
if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
delta = sign_extend64(delta, 31);
local64_add(delta, &event->count);
} else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
/* If valid, extract digital readout, otherwise set to -1: */
now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
local64_set(&event->count, now);
} else {
local64_add(delta, &event->count);
}
}
static void msr_event_start(struct perf_event *event, int flags)
{
u64 now = msr_read_counter(event);
local64_set(&event->hw.prev_count, now);
}
static void msr_event_stop(struct perf_event *event, int flags)
{
msr_event_update(event);
}
static void msr_event_del(struct perf_event *event, int flags)
{
msr_event_stop(event, PERF_EF_UPDATE);
}
static int msr_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
msr_event_start(event, flags);
return 0;
}
static struct pmu pmu_msr = {
.task_ctx_nr = perf_sw_context,
.attr_groups = attr_groups,
.event_init = msr_event_init,
.add = msr_event_add,
.del = msr_event_del,
.start = msr_event_start,
.stop = msr_event_stop,
.read = msr_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static int __init msr_init(void)
{
int i, j = 0;
if (!boot_cpu_has(X86_FEATURE_TSC)) {
pr_cont("no MSR PMU driver.\n");
return 0;
}
/* Probe the MSRs. */
for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
u64 val;
/* Virt sucks; you cannot tell if a R/O MSR is present :/ */
if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
msr[i].attr = NULL;
}
/* List remaining MSRs in the sysfs attrs. */
for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
if (msr[i].attr)
events_attrs[j++] = &msr[i].attr->attr.attr;
}
events_attrs[j] = NULL;
perf_pmu_register(&pmu_msr, "msr", -1);
return 0;
}
device_initcall(msr_init);
| codeaurora-unoffical/linux-msm | arch/x86/events/msr.c | C | gpl-2.0 | 7,191 |
/*
* Copyright (C) 2008 Google, Inc.
* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
#include <mach/board.h>
#include "devices.h"
#include <asm/mach/flash.h>
#include <mach/mmc.h>
static struct resource resources_uart3[] = {
{
.start = INT_UART3,
.end = INT_UART3,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART3_PHYS,
.end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device msm_device_uart3 = {
.name = "msm_serial",
.id = 2,
.num_resources = ARRAY_SIZE(resources_uart3),
.resource = resources_uart3,
};
struct clk msm_clocks_8x50[] = {
CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF),
CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
};
unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
| sigma-random/asuswrt-merlin | release/src-rt-7.x.main/src/linux/linux-2.6.36/arch/arm/mach-msm/devices-qsd8x50.c | C | gpl-2.0 | 3,119 |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/kernel/sh_ksyms.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
/* platform dependent support */
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(strstr);
#ifdef CONFIG_VT
EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(flush_dcache_page);
/* For ext3 */
EXPORT_SYMBOL(sh64_page_clear);
/* Ugh. These come in from libgcc.a at link time. */
extern void __sdivsi3(void);
extern void __muldi3(void);
extern void __udivsi3(void);
extern char __div_table;
EXPORT_SYMBOL(__sdivsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__div_table);
| ghmajx/asuswrt-merlin | release/src-rt/linux/linux-2.6/arch/sh64/kernel/sh_ksyms.c | C | gpl-2.0 | 1,687 |
/* Address ranges.
Copyright (C) 1998-2014 Free Software Foundation, Inc.
Contributed by Cygnus Solutions.
This file is part of the GNU Simulators.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* Tell sim-arange.h it's us. */
#define SIM_ARANGE_C
#include "libiberty.h"
#include "sim-basics.h"
#include "sim-assert.h"
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#define DEFINE_INLINE_P (! defined (SIM_ARANGE_C_INCLUDED))
#define DEFINE_NON_INLINE_P defined (SIM_ARANGE_C_INCLUDED)
#if DEFINE_NON_INLINE_P
/* Insert a range. */
static void
insert_range (ADDR_SUBRANGE **pos, ADDR_SUBRANGE *asr)
{
asr->next = *pos;
*pos = asr;
}
/* Delete a range. */
static void
delete_range (ADDR_SUBRANGE **thisasrp)
{
ADDR_SUBRANGE *thisasr;
thisasr = *thisasrp;
*thisasrp = thisasr->next;
free (thisasr);
}
/* Add or delete an address range.
This code was borrowed from linux's locks.c:posix_lock_file().
??? Todo: Given our simpler needs this could be simplified
(split into two fns). */
static void
frob_range (ADDR_RANGE *ar, address_word start, address_word end, int delete_p)
{
ADDR_SUBRANGE *asr;
ADDR_SUBRANGE *new_asr, *new_asr2;
ADDR_SUBRANGE *left = NULL;
ADDR_SUBRANGE *right = NULL;
ADDR_SUBRANGE **before;
ADDR_SUBRANGE init_caller;
ADDR_SUBRANGE *caller = &init_caller;
int added_p = 0;
memset (caller, 0, sizeof (ADDR_SUBRANGE));
new_asr = ZALLOC (ADDR_SUBRANGE);
new_asr2 = ZALLOC (ADDR_SUBRANGE);
caller->start = start;
caller->end = end;
before = &ar->ranges;
while ((asr = *before) != NULL)
{
if (! delete_p)
{
/* Try next range if current range preceeds new one and not
adjacent or overlapping. */
if (asr->end < caller->start - 1)
goto next_range;
/* Break out if new range preceeds current one and not
adjacent or overlapping. */
if (asr->start > caller->end + 1)
break;
/* If we come here, the new and current ranges are adjacent or
overlapping. Make one range yielding from the lower start address
of both ranges to the higher end address. */
if (asr->start > caller->start)
asr->start = caller->start;
else
caller->start = asr->start;
if (asr->end < caller->end)
asr->end = caller->end;
else
caller->end = asr->end;
if (added_p)
{
delete_range (before);
continue;
}
caller = asr;
added_p = 1;
}
else /* deleting a range */
{
/* Try next range if current range preceeds new one. */
if (asr->end < caller->start)
goto next_range;
/* Break out if new range preceeds current one. */
if (asr->start > caller->end)
break;
added_p = 1;
if (asr->start < caller->start)
left = asr;
/* If the next range in the list has a higher end
address than the new one, insert the new one here. */
if (asr->end > caller->end)
{
right = asr;
break;
}
if (asr->start >= caller->start)
{
/* The new range completely replaces an old
one (This may happen several times). */
if (added_p)
{
delete_range (before);
continue;
}
/* Replace the old range with the new one. */
asr->start = caller->start;
asr->end = caller->end;
caller = asr;
added_p = 1;
}
}
/* Go on to next range. */
next_range:
before = &asr->next;
}
if (!added_p)
{
if (delete_p)
goto out;
new_asr->start = caller->start;
new_asr->end = caller->end;
insert_range (before, new_asr);
new_asr = NULL;
}
if (right)
{
if (left == right)
{
/* The new range breaks the old one in two pieces,
so we have to use the second new range. */
new_asr2->start = right->start;
new_asr2->end = right->end;
left = new_asr2;
insert_range (before, left);
new_asr2 = NULL;
}
right->start = caller->end + 1;
}
if (left)
{
left->end = caller->start - 1;
}
out:
if (new_asr)
free (new_asr);
if (new_asr2)
free (new_asr2);
}
/* Free T and all subtrees. */
static void
free_search_tree (ADDR_RANGE_TREE *t)
{
if (t != NULL)
{
free_search_tree (t->lower);
free_search_tree (t->higher);
free (t);
}
}
/* Subroutine of build_search_tree to recursively build a balanced tree.
??? It's not an optimum tree though. */
static ADDR_RANGE_TREE *
build_tree_1 (ADDR_SUBRANGE **asrtab, unsigned int n)
{
unsigned int mid = n / 2;
ADDR_RANGE_TREE *t;
if (n == 0)
return NULL;
t = (ADDR_RANGE_TREE *) xmalloc (sizeof (ADDR_RANGE_TREE));
t->start = asrtab[mid]->start;
t->end = asrtab[mid]->end;
if (mid != 0)
t->lower = build_tree_1 (asrtab, mid);
else
t->lower = NULL;
if (n > mid + 1)
t->higher = build_tree_1 (asrtab + mid + 1, n - mid - 1);
else
t->higher = NULL;
return t;
}
/* Build a search tree for address range AR. */
static void
build_search_tree (ADDR_RANGE *ar)
{
/* ??? Simple version for now. */
ADDR_SUBRANGE *asr,**asrtab;
unsigned int i, n;
for (n = 0, asr = ar->ranges; asr != NULL; ++n, asr = asr->next)
continue;
asrtab = (ADDR_SUBRANGE **) xmalloc (n * sizeof (ADDR_SUBRANGE *));
for (i = 0, asr = ar->ranges; i < n; ++i, asr = asr->next)
asrtab[i] = asr;
ar->range_tree = build_tree_1 (asrtab, n);
free (asrtab);
}
void
sim_addr_range_add (ADDR_RANGE *ar, address_word start, address_word end)
{
frob_range (ar, start, end, 0);
/* Rebuild the search tree. */
/* ??? Instead of rebuilding it here it could be done in a module resume
handler, say by first checking for a `changed' flag, assuming of course
this would never be done while the simulation is running. */
free_search_tree (ar->range_tree);
build_search_tree (ar);
}
void
sim_addr_range_delete (ADDR_RANGE *ar, address_word start, address_word end)
{
frob_range (ar, start, end, 1);
/* Rebuild the search tree. */
/* ??? Instead of rebuilding it here it could be done in a module resume
handler, say by first checking for a `changed' flag, assuming of course
this would never be done while the simulation is running. */
free_search_tree (ar->range_tree);
build_search_tree (ar);
}
#endif /* DEFINE_NON_INLINE_P */
#if DEFINE_INLINE_P
SIM_ARANGE_INLINE int
sim_addr_range_hit_p (ADDR_RANGE *ar, address_word addr)
{
ADDR_RANGE_TREE *t = ar->range_tree;
while (t != NULL)
{
if (addr < t->start)
t = t->lower;
else if (addr > t->end)
t = t->higher;
else
return 1;
}
return 0;
}
#endif /* DEFINE_INLINE_P */
| tuliom/binutils-gdb | sim/common/sim-arange.c | C | gpl-2.0 | 7,244 |
/*
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/if.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
static ssize_t ieee80211_if_read(
struct ieee80211_sub_if_data *sdata,
char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int))
{
char buf[70];
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
if (ret != -EINVAL)
ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
return ret;
}
static ssize_t ieee80211_if_write(
struct ieee80211_sub_if_data *sdata,
const char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
{
u8 *buf;
ssize_t ret;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = -EFAULT;
if (copy_from_user(buf, userbuf, count))
goto freebuf;
ret = -ENODEV;
rtnl_lock();
if (sdata->dev->reg_state == NETREG_REGISTERED)
ret = (*write)(sdata, buf, count);
rtnl_unlock();
freebuf:
kfree(buf);
return ret;
}
#define IEEE80211_IF_FMT(name, field, format_string) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, format_string, sdata->field); \
}
#define IEEE80211_IF_FMT_DEC(name, field) \
IEEE80211_IF_FMT(name, field, "%d\n")
#define IEEE80211_IF_FMT_HEX(name, field) \
IEEE80211_IF_FMT(name, field, "%#x\n")
#define IEEE80211_IF_FMT_SIZE(name, field) \
IEEE80211_IF_FMT(name, field, "%zd\n")
#define IEEE80211_IF_FMT_ATOMIC(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\
}
#define IEEE80211_IF_FMT_MAC(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
int buflen) \
{ \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, \
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
}
#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return ieee80211_if_read(file->private_data, \
userbuf, count, ppos, \
ieee80211_if_fmt_##name); \
} \
static const struct file_operations name##_ops = { \
.read = ieee80211_if_read_##name, \
.write = (_write), \
.open = mac80211_open_file_generic, \
}
#define __IEEE80211_IF_FILE_W(name) \
static ssize_t ieee80211_if_write_##name(struct file *file, \
const char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return ieee80211_if_write(file->private_data, userbuf, count, \
ppos, ieee80211_if_parse_##name); \
} \
__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
#define IEEE80211_IF_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, field) \
__IEEE80211_IF_FILE(name, NULL)
/* common attributes */
IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
HEX);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_local *local = sdata->local;
int err;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
smps_mode == IEEE80211_SMPS_STATIC)
return -EINVAL;
/* auto should be dynamic if in PS mode */
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
(smps_mode == IEEE80211_SMPS_DYNAMIC ||
smps_mode == IEEE80211_SMPS_AUTOMATIC))
return -EINVAL;
/* supported only on managed interfaces for now */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
mutex_lock(&local->iflist_mtx);
err = __ieee80211_request_smps(sdata, smps_mode);
mutex_unlock(&local->iflist_mtx);
return err;
}
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
[IEEE80211_SMPS_STATIC] = "static",
[IEEE80211_SMPS_DYNAMIC] = "dynamic",
};
static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
char *buf, int buflen)
{
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
return snprintf(buf, buflen, "request: %s\nused: %s\n",
smps_modes[sdata->u.mgd.req_smps],
smps_modes[sdata->u.mgd.ap_smps]);
}
static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
const char *buf, int buflen)
{
enum ieee80211_smps_mode mode;
for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
if (strncmp(buf, smps_modes[mode], buflen) == 0) {
int err = ieee80211_set_smps(sdata, mode);
if (!err)
return buflen;
return err;
}
}
return -EINVAL;
}
__IEEE80211_IF_FILE_W(smps);
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
static ssize_t ieee80211_if_fmt_num_buffered_multicast(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
return scnprintf(buf, buflen, "%u\n",
skb_queue_len(&sdata->u.ap.ps_bc_buf));
}
__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
#ifdef CONFIG_MAC80211_MESH
/* Mesh stats attributes */
IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
/* Mesh parameters */
IEEE80211_IF_FILE(dot11MeshMaxRetries,
u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
IEEE80211_IF_FILE(dot11MeshRetryTimeout,
u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
IEEE80211_IF_FILE(path_refresh_time,
u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
#endif
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
sdata, &name##_ops);
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, sdata->debugfs.dir, \
sdata, &name##_ops);
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(bssid);
DEBUGFS_ADD(aid);
DEBUGFS_ADD(last_beacon);
DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(peer);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
{
}
#ifdef CONFIG_MAC80211_MESH
static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_stats",
sdata->debugfs.dir);
#define MESHSTATS_ADD(name)\
debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
MESHSTATS_ADD(fwded_mcast);
MESHSTATS_ADD(fwded_unicast);
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
MESHSTATS_ADD(estab_plinks);
#undef MESHSTATS_ADD
}
static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir = debugfs_create_dir("mesh_config",
sdata->debugfs.dir);
#define MESHPARAMS_ADD(name) \
debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
MESHPARAMS_ADD(dot11MeshMaxRetries);
MESHPARAMS_ADD(dot11MeshRetryTimeout);
MESHPARAMS_ADD(dot11MeshConfirmTimeout);
MESHPARAMS_ADD(dot11MeshHoldingTimeout);
MESHPARAMS_ADD(dot11MeshTTL);
MESHPARAMS_ADD(auto_open_plinks);
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
#undef MESHPARAMS_ADD
}
#endif
static void add_files(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->debugfs.dir)
return;
switch (sdata->vif.type) {
case NL80211_IFTYPE_MESH_POINT:
#ifdef CONFIG_MAC80211_MESH
add_mesh_stats(sdata);
add_mesh_config(sdata);
#endif
break;
case NL80211_IFTYPE_STATION:
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
break;
case NL80211_IFTYPE_WDS:
add_wds_files(sdata);
break;
case NL80211_IFTYPE_MONITOR:
add_monitor_files(sdata);
break;
case NL80211_IFTYPE_AP_VLAN:
add_vlan_files(sdata);
break;
default:
break;
}
}
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
{
char buf[10+IFNAMSIZ];
sprintf(buf, "netdev:%s", sdata->name);
sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
add_files(sdata);
}
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
{
if (!sdata->debugfs.dir)
return;
debugfs_remove_recursive(sdata->debugfs.dir);
sdata->debugfs.dir = NULL;
}
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
struct dentry *dir;
char buf[10 + IFNAMSIZ];
dir = sdata->debugfs.dir;
if (!dir)
return;
sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
"dir to %s\n", buf);
}
| wkritzinger/asuswrt-merlin | release/src-rt-7.x.main/src/linux/linux-2.6.36/net/mac80211/debugfs_netdev.c | C | gpl-2.0 | 12,386 |
/*=============================================================================
Copyright (c) 2010 Christopher Schmidt
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef BOOST_FUSION_INCLUDE_UNFUSED_HPP
#define BOOST_FUSION_INCLUDE_UNFUSED_HPP
#include <boost/fusion/functional/adapter/unfused.hpp>
#endif
| beiko-lab/gengis | win32/library3rd/boost_1_47/boost/fusion/include/unfused.hpp | C++ | gpl-3.0 | 520 |
import { CompileIdentifierMetadata, CompileTokenMetadata } from './compile_metadata';
export interface IdentifierSpec {
name: string;
moduleUrl: string;
runtime: any;
}
export declare class Identifiers {
static ANALYZE_FOR_ENTRY_COMPONENTS: IdentifierSpec;
static ElementRef: IdentifierSpec;
static NgModuleRef: IdentifierSpec;
static ViewContainerRef: IdentifierSpec;
static ChangeDetectorRef: IdentifierSpec;
static QueryList: IdentifierSpec;
static TemplateRef: IdentifierSpec;
static CodegenComponentFactoryResolver: IdentifierSpec;
static ComponentFactoryResolver: IdentifierSpec;
static ComponentFactory: IdentifierSpec;
static ComponentRef: IdentifierSpec;
static NgModuleFactory: IdentifierSpec;
static NgModuleInjector: IdentifierSpec;
static RegisterModuleFactoryFn: IdentifierSpec;
static Injector: IdentifierSpec;
static ViewEncapsulation: IdentifierSpec;
static ChangeDetectionStrategy: IdentifierSpec;
static SecurityContext: IdentifierSpec;
static LOCALE_ID: IdentifierSpec;
static TRANSLATIONS_FORMAT: IdentifierSpec;
static inlineInterpolate: IdentifierSpec;
static interpolate: IdentifierSpec;
static EMPTY_ARRAY: IdentifierSpec;
static EMPTY_MAP: IdentifierSpec;
static Renderer: IdentifierSpec;
static viewDef: IdentifierSpec;
static elementDef: IdentifierSpec;
static anchorDef: IdentifierSpec;
static textDef: IdentifierSpec;
static directiveDef: IdentifierSpec;
static providerDef: IdentifierSpec;
static queryDef: IdentifierSpec;
static pureArrayDef: IdentifierSpec;
static pureObjectDef: IdentifierSpec;
static purePipeDef: IdentifierSpec;
static pipeDef: IdentifierSpec;
static nodeValue: IdentifierSpec;
static ngContentDef: IdentifierSpec;
static unwrapValue: IdentifierSpec;
static createRendererType2: IdentifierSpec;
static RendererType2: IdentifierSpec;
static ViewDefinition: IdentifierSpec;
static createComponentFactory: IdentifierSpec;
}
export declare function assetUrl(pkg: string, path?: string, type?: string): string;
export declare function resolveIdentifier(identifier: IdentifierSpec): any;
export declare function createIdentifier(identifier: IdentifierSpec): CompileIdentifierMetadata;
export declare function identifierToken(identifier: CompileIdentifierMetadata): CompileTokenMetadata;
export declare function createIdentifierToken(identifier: IdentifierSpec): CompileTokenMetadata;
export declare function createEnumIdentifier(enumType: IdentifierSpec, name: string): CompileIdentifierMetadata;
| DaanKrug/Rocker-Framework | samples/Rocker-Framework-Functional-Show-Case/node_modules/@angular/compiler/src/identifiers.d.ts | TypeScript | gpl-3.0 | 2,628 |
using System;
using System.Collections.Generic;
using UnityEngine;
namespace UnityTest
{
[Serializable]
public class UnitTestResult : ITestResult
{
public bool Executed { get; set; }
public string Name { get { return Test.MethodName; } }
public string FullName { get { return Test.FullName; } }
public TestResultState ResultState { get; set; }
public UnitTestInfo Test { get; set; }
public string Id { get { return Test.Id; } }
public double Duration { get; set; }
public string Message { get; set; }
public string StackTrace { get; set; }
public bool IsIgnored { get; set; }
public string Logs { get; set; }
public bool Outdated { get; set; }
public void Update(ITestResult source, bool outdated)
{
ResultState = source.ResultState;
Duration = source.Duration;
Message = source.Message;
Logs = source.Logs;
StackTrace = source.StackTrace;
Executed = source.Executed;
IsIgnored = source.IsIgnored || (Test != null && Test.IsIgnored);
Outdated = outdated;
}
#region Helper methods
public bool IsFailure
{
get { return ResultState == TestResultState.Failure; }
}
public bool IsError
{
get { return ResultState == TestResultState.Error; }
}
public bool IsSuccess
{
get { return ResultState == TestResultState.Success; }
}
public bool IsInconclusive
{
get { return ResultState == TestResultState.Inconclusive; }
}
#endregion
}
}
| sabikku/unity-midi | unity-midi/Assets/UnityTestTools/UnitTesting/Editor/TestRunner/UnitTestResult.cs | C# | apache-2.0 | 1,727 |
/**********************************************************************************
*
* $Id$
*
***********************************************************************************
*
* Copyright (c) 2007, 2008 The Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************/
package org.sakaiproject.service.gradebook.shared;
import java.util.Date;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* This service is designed for use by external assessment engines. These use
* the Gradebook as a passive mirror of their own assignments and scores,
* letting Gradebook users see those assignments alongside Gradebook-managed
* assignments, and combine them when calculating a course grade. The Gradebook
* application itself will not modify externally-managed assignments and scores.
*
* <b>WARNING</b>: Because the Gradebook project team is not responsible for
* defining the external clients' requirements, the Gradebook service does not
* attempt to guess at their authorization needs. Our administrative and
* external-assessment methods simply follow orders and assume that the caller
* has taken the responsibility of "doing the right thing." DO NOT wrap these
* methods in an open web service!
*/
public interface GradebookExternalAssessmentService {
/**
* @deprecated Replaced by
* {@link addExternalAssessment(String, String, String, String, Double, Date, String, Boolean)}
*/
public void addExternalAssessment(String gradebookUid, String externalId, String externalUrl,
String title, double points, Date dueDate, String externalServiceDescription)
throws GradebookNotFoundException, ConflictingAssignmentNameException,
ConflictingExternalIdException, AssignmentHasIllegalPointsException;
/**
* Add an externally-managed assessment to a gradebook to be treated as a
* read-only assignment. The gradebook application will not modify the
* assessment properties or create any scores for the assessment.
* Since each assignment in a given gradebook must have a unique name,
* conflicts are possible.
*
* @param gradebookUid
* @param externalId
* some unique identifier which Samigo uses for the assessment.
* The externalId is globally namespaced within the gradebook, so
* if other apps decide to put assessments into the gradebook,
* they should prefix their externalIds with a well known (and
* unique within sakai) string.
* @param externalUrl
* a link to go to if the instructor or student wants to look at the assessment
* in Samigo; if null, no direct link will be provided in the
* gradebook, and the user will have to navigate to the assessment
* within the other application
* @param title
* @param points
* this is the total amount of points available and must be greater than zero.
* it could be null if it's an ungraded item.
* @param dueDate
* @param externalServiceDescription
* @param ungraded
*
* @param externalServiceDescription
* what to display as the source of the assignment (e.g., "from Samigo")
*
*/
public void addExternalAssessment(String gradebookUid, String externalId, String externalUrl,
String title, Double points, Date dueDate, String externalServiceDescription, Boolean ungraded)
throws GradebookNotFoundException, ConflictingAssignmentNameException,
ConflictingExternalIdException, AssignmentHasIllegalPointsException;
/**
* This method is identical to {@link #addExternalAssessment(String, String, String, String, Double, Date, String, Boolean)} but
* allows you to also specify the associated Category for this assignment. If the gradebook is set up for categories and
* categoryId is null, assignment category will be unassigned
* @param gradebookUid
* @param externalId
* @param externalUrl
* @param title
* @param points
* @param dueDate
* @param externalServiceDescription
* @param ungraded
* @param categoryId
* @throws GradebookNotFoundException
* @throws ConflictingAssignmentNameException
* @throws ConflictingExternalIdException
* @throws AssignmentHasIllegalPointsException
* @throws InvalidCategoryException
*/
public void addExternalAssessment(String gradebookUid, String externalId, String externalUrl,
String title, Double points, Date dueDate, String externalServiceDescription, Boolean ungraded, Long categoryId)
throws GradebookNotFoundException, ConflictingAssignmentNameException,
ConflictingExternalIdException, AssignmentHasIllegalPointsException, InvalidCategoryException;
/**
* @deprecated Replaced by
* {@link updateExternalAssessment(String, String, String, String, Double, Date, Boolean)}
*/
public void updateExternalAssessment(String gradebookUid, String externalId, String externalUrl,
String title, double points, Date dueDate)
throws GradebookNotFoundException, AssessmentNotFoundException,
ConflictingAssignmentNameException, AssignmentHasIllegalPointsException;
/**
* Update an external assessment
* @param gradebookUid
* @param externalId
* @param externalUrl
* @param title
* @param points
* @param dueDate
* @param ungraded
* @throws GradebookNotFoundException
* @throws AssessmentNotFoundException
* @throws ConflictingAssignmentNameException
* @throws AssignmentHasIllegalPointsException
*/
public void updateExternalAssessment(String gradebookUid, String externalId, String externalUrl,
String title, Double points, Date dueDate, Boolean ungraded)
throws GradebookNotFoundException, AssessmentNotFoundException,
ConflictingAssignmentNameException, AssignmentHasIllegalPointsException;
/**
* Remove the assessment reference from the gradebook. Although Samigo
* doesn't currently delete assessments, an instructor can retract an
* assessment to keep it from students. Since such an assessment would
* presumably no longer be used to calculate final grades, Samigo should
* also remove that assessment from the gradebook.
*
* @param externalId
* the UID of the assessment
*/
public void removeExternalAssessment(String gradebookUid, String externalId)
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
* Updates an external score for an external assignment in the gradebook.
*
* @param gradebookUid
* The Uid of the gradebook
* @param externalId
* The external ID of the assignment/assessment
* @param studentUid
* The unique id of the student
* @param points
* The number of points earned on this assessment, or null if a score
* should be removed
*/
public void updateExternalAssessmentScore(String gradebookUid, String externalId,
String studentUid, String points)
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
*
* @param gradebookUid
* @param externalId
* @param studentUidsToScores
* @throws GradebookNotFoundException
* @throws AssessmentNotFoundException
*
* @deprecated Replaced by
* {@link updateExternalAssessmentScoresString(String, String, Map<String, String)}
*/
public void updateExternalAssessmentScores(String gradebookUid,
String externalId, Map<String, Double> studentUidsToScores)
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
* Updates a set of external scores for an external assignment in the gradebook.
*
* @param gradebookUid
* The Uid of the gradebook
* @param externalId
* The external ID of the assignment/assessment
* @param studentUidsToScores
* A map whose String keys are the unique ID strings of the students and whose
* String values are points earned on this assessment or null if the score
* should be removed.
*/
public void updateExternalAssessmentScoresString(String gradebookUid,
String externalId, Map<String, String> studentUidsToScores)
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
* Updates an external comment for an external assignment in the gradebook.
*
* @param gradebookUid
* The Uid of the gradebook
* @param externalId
* The external ID of the assignment/assessment
* @param studentUid
* The unique id of the student
* @param comment
* The comment to be added to this grade, or null if a comment
* should be removed
*/
public void updateExternalAssessmentComment(String gradebookUid,
String externalId, String studentUid, String comment )
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
* Updates a set of external comments for an external assignment in the gradebook.
*
* @param gradebookUid
* The Uid of the gradebook
* @param externalId
* The external ID of the assignment/assessment
* @param studentUidsToScores
* A map whose String keys are the unique ID strings of the students and whose
* String values are comments or null if the comments
* should be removed.
*/
public void updateExternalAssessmentComments(String gradebookUid,
String externalId, Map<String, String> studentUidsToComments)
throws GradebookNotFoundException, AssessmentNotFoundException;
/**
* Check to see if an assignment with the given name already exists
* in the given gradebook. This will give external assessment systems
* a chance to avoid the ConflictingAssignmentNameException.
*/
public boolean isAssignmentDefined(String gradebookUid, String assignmentTitle)
throws GradebookNotFoundException;
/**
* Check to see if an assignment with the given external id already exists
* in the given gradebook. This will give external assessment systems
* a chance to avoid the ConflictingExternalIdException.
*
* @param gradebookUid The gradebook's unique identifier
* @param externalId The external assessment's external identifier
*/
public boolean isExternalAssignmentDefined(String gradebookUid, String externalId)
throws GradebookNotFoundException;
/**
* Check with the appropriate external service if a specific assignment is
* available only to groups.
*
* @param gradebookUid The gradebook's unique identifier
* @param externalId The external assessment's external identifier
*/
public boolean isExternalAssignmentGrouped(String gradebookUid, String externalId)
throws GradebookNotFoundException;
/**
* Check with the appropriate external service if a specific assignment is
* available to a specific user (i.e., the user is in an appropriate group).
* Note that this method will return true if the assignment exists in the
* gradebook and is marked as externally maintained while no provider
* recognizes it; this is to maintain a safer default (no change from the
* 2.8 release) for tools that have not implemented a provider.
*
* @param gradebookUid The gradebook's unique identifier
* @param externalId The external assessment's external identifier
* @param userId The user ID to check
*/
public boolean isExternalAssignmentVisible(String gradebookUid, String externalId, String userId)
throws GradebookNotFoundException;
/**
* Retrieve all assignments for a gradebook that are marked as externally
* maintained and are visible to the current user. Assignments may be included
* with a null providerAppKey, indicating that the gradebook references the
* assignment, but no provider claims responsibility for it.
*
* @param gradebookUid The gradebook's unique identifier
* @return A map from the externalId of each activity to the providerAppKey
*/
public Map<String, String> getExternalAssignmentsForCurrentUser(String gradebookUid)
throws GradebookNotFoundException;
/**
* Retrieve a list of all visible, external assignments for a set of users.
*
* @param gradebookUid The gradebook's unique identifier
* @param studentIds The collection of student IDs for which to retrieve assignments
* @return A map from the student ID to all visible, external activity IDs
*/
public Map<String, List<String>> getVisibleExternalAssignments(String gradebookUid, Collection<String> studentIds)
throws GradebookNotFoundException;
/**
* Register a new ExternalAssignmentProvider for handling the integration of external
* assessment sources with the sakai gradebook
* Registering more than once will overwrite the current with the new one
*
* @param provider the provider implementation object
*/
public void registerExternalAssignmentProvider(ExternalAssignmentProvider provider);
/**
* Remove/unregister any ExternalAssignmentProvider which is currently registered,
* does nothing if they provider does not exist
*
* @param providerAppKey the unique app key for a provider
*/
public void unregisterExternalAssignmentProvider(String providerAppKey);
/**
* Checks to see whether a gradebook with the given uid exists.
*
* @param gradebookUid
* The gradebook UID to check
* @return Whether the gradebook exists
*/
public boolean isGradebookDefined(String gradebookUid);
/**
* Break the connection between an external assessment engine and an assessment which
* it created, giving it up to the Gradebook application to control from now on.
*
* @param gradebookUid
* @param externalId
*/
public void setExternalAssessmentToGradebookAssignment(String gradebookUid, String externalId);
/**
* Get the category of a gradebook with the externalId given
*
* @param gradebookUId
* @param externalId
* @return
*/
public Long getExternalAssessmentCategoryId(String gradebookUId, String externalId);
}
| rodriguezdevera/sakai | edu-services/gradebook-service/api/src/java/org/sakaiproject/service/gradebook/shared/GradebookExternalAssessmentService.java | Java | apache-2.0 | 14,476 |
/******************************************************************************
*
* Name: acoutput.h -- debug output
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef __ACOUTPUT_H__
#define __ACOUTPUT_H__
/*
* Debug levels and component IDs. These are used to control the
* granularity of the output of the ACPI_DEBUG_PRINT macro -- on a
* per-component basis and a per-exception-type basis.
*/
/* Component IDs are used in the global "DebugLayer" */
#define ACPI_UTILITIES 0x00000001
#define ACPI_HARDWARE 0x00000002
#define ACPI_EVENTS 0x00000004
#define ACPI_TABLES 0x00000008
#define ACPI_NAMESPACE 0x00000010
#define ACPI_PARSER 0x00000020
#define ACPI_DISPATCHER 0x00000040
#define ACPI_EXECUTER 0x00000080
#define ACPI_RESOURCES 0x00000100
#define ACPI_CA_DEBUGGER 0x00000200
#define ACPI_OS_SERVICES 0x00000400
#define ACPI_CA_DISASSEMBLER 0x00000800
/* Component IDs for ACPI tools and utilities */
#define ACPI_COMPILER 0x00001000
#define ACPI_TOOLS 0x00002000
#define ACPI_EXAMPLE 0x00004000
#define ACPI_DRIVER 0x00008000
#define DT_COMPILER 0x00010000
#define ASL_PREPROCESSOR 0x00020000
#define ACPI_ALL_COMPONENTS 0x0001FFFF
#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS)
/* Component IDs reserved for ACPI drivers */
#define ACPI_ALL_DRIVERS 0xFFFF0000
/*
* Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros
*/
#define ACPI_LV_INIT 0x00000001
#define ACPI_LV_DEBUG_OBJECT 0x00000002
#define ACPI_LV_INFO 0x00000004
#define ACPI_LV_REPAIR 0x00000008
#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F
/* Trace verbosity level 1 [Standard Trace Level] */
#define ACPI_LV_INIT_NAMES 0x00000020
#define ACPI_LV_PARSE 0x00000040
#define ACPI_LV_LOAD 0x00000080
#define ACPI_LV_DISPATCH 0x00000100
#define ACPI_LV_EXEC 0x00000200
#define ACPI_LV_NAMES 0x00000400
#define ACPI_LV_OPREGION 0x00000800
#define ACPI_LV_BFIELD 0x00001000
#define ACPI_LV_TABLES 0x00002000
#define ACPI_LV_VALUES 0x00004000
#define ACPI_LV_OBJECTS 0x00008000
#define ACPI_LV_RESOURCES 0x00010000
#define ACPI_LV_USER_REQUESTS 0x00020000
#define ACPI_LV_PACKAGE 0x00040000
#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS
/* Trace verbosity level 2 [Function tracing and memory allocation] */
#define ACPI_LV_ALLOCATIONS 0x00100000
#define ACPI_LV_FUNCTIONS 0x00200000
#define ACPI_LV_OPTIMIZATIONS 0x00400000
#define ACPI_LV_VERBOSITY2 0x00700000 | ACPI_LV_VERBOSITY1
#define ACPI_LV_ALL ACPI_LV_VERBOSITY2
/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
#define ACPI_LV_MUTEX 0x01000000
#define ACPI_LV_THREADS 0x02000000
#define ACPI_LV_IO 0x04000000
#define ACPI_LV_INTERRUPTS 0x08000000
#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2
/* Exceptionally verbose output -- also used in the global "DebugLevel" */
#define ACPI_LV_AML_DISASSEMBLE 0x10000000
#define ACPI_LV_VERBOSE_INFO 0x20000000
#define ACPI_LV_FULL_TABLES 0x40000000
#define ACPI_LV_EVENTS 0x80000000
#define ACPI_LV_VERBOSE 0xF0000000
/*
* Debug level macros that are used in the DEBUG_PRINT macros
*/
#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS
/*
* Exception level -- used in the global "DebugLevel"
*
* Note: For errors, use the ACPI_ERROR or ACPI_EXCEPTION interfaces.
* For warnings, use ACPI_WARNING.
*/
#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT)
#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
/* Trace level -- also used in the global "DebugLevel" */
#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES)
#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS)
#define ACPI_DB_PARSE ACPI_DEBUG_LEVEL (ACPI_LV_PARSE)
#define ACPI_DB_DISPATCH ACPI_DEBUG_LEVEL (ACPI_LV_DISPATCH)
#define ACPI_DB_LOAD ACPI_DEBUG_LEVEL (ACPI_LV_LOAD)
#define ACPI_DB_EXEC ACPI_DEBUG_LEVEL (ACPI_LV_EXEC)
#define ACPI_DB_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_NAMES)
#define ACPI_DB_OPREGION ACPI_DEBUG_LEVEL (ACPI_LV_OPREGION)
#define ACPI_DB_BFIELD ACPI_DEBUG_LEVEL (ACPI_LV_BFIELD)
#define ACPI_DB_TABLES ACPI_DEBUG_LEVEL (ACPI_LV_TABLES)
#define ACPI_DB_FUNCTIONS ACPI_DEBUG_LEVEL (ACPI_LV_FUNCTIONS)
#define ACPI_DB_OPTIMIZATIONS ACPI_DEBUG_LEVEL (ACPI_LV_OPTIMIZATIONS)
#define ACPI_DB_VALUES ACPI_DEBUG_LEVEL (ACPI_LV_VALUES)
#define ACPI_DB_OBJECTS ACPI_DEBUG_LEVEL (ACPI_LV_OBJECTS)
#define ACPI_DB_ALLOCATIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALLOCATIONS)
#define ACPI_DB_RESOURCES ACPI_DEBUG_LEVEL (ACPI_LV_RESOURCES)
#define ACPI_DB_IO ACPI_DEBUG_LEVEL (ACPI_LV_IO)
#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS)
#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS)
#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE)
#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX)
#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS)
#define ACPI_DB_ALL ACPI_DEBUG_LEVEL (ACPI_LV_ALL)
/* Defaults for debug_level, debug and normal */
#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
/*
* The module name is used primarily for error and debug messages.
* The __FILE__ macro is not very useful for this, because it
* usually includes the entire pathname to the module making the
* debug output difficult to read.
*/
#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
#else
/*
* For the no-debug and no-error-msg cases, we must at least define
* a null module name.
*/
#define ACPI_MODULE_NAME(name)
#define _acpi_module_name ""
#endif
/*
* Ascii error messages can be configured out
*/
#ifndef ACPI_NO_ERROR_MESSAGES
#define AE_INFO _acpi_module_name, __LINE__
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
#define ACPI_INFO(plist) acpi_info plist
#define ACPI_WARNING(plist) acpi_warning plist
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i)
#else
/* No error messages */
#define ACPI_INFO(plist)
#define ACPI_WARNING(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
#define ACPI_BIOS_WARNING(plist)
#define ACPI_BIOS_ERROR(plist)
#define ACPI_DEBUG_OBJECT(obj,l,i)
#endif /* ACPI_NO_ERROR_MESSAGES */
/*
* Debug macros that are conditionally compiled
*/
#ifdef ACPI_DEBUG_OUTPUT
/*
* If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
* define it now. This is the case where there the compiler does not support
* a __FUNCTION__ macro or equivalent.
*/
#ifndef ACPI_GET_FUNCTION_NAME
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
/*
* The Name parameter should be the procedure name as a quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __FUNCTION__.
*/
#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name;
#else
/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
#define ACPI_FUNCTION_NAME(name)
#endif /* ACPI_GET_FUNCTION_NAME */
/*
* Common parameters used for debug output functions:
* line number, function name, module(file) name, component ID
*/
#define ACPI_DEBUG_PARAMETERS \
__LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
/* Check if debug output is currently dynamically enabled */
#define ACPI_IS_DEBUG_ENABLED(level, component) \
((level & acpi_dbg_level) && (component & acpi_dbg_layer))
/*
* Master debug print macros
* Print message if and only if:
* 1) Debug print for the current component is enabled
* 2) Debug error level or trace level for the print statement is enabled
*
* November 2012: Moved the runtime check for whether to actually emit the
* debug message outside of the print function itself. This improves overall
* performance at a relatively small code cost. Implementation involves the
* use of variadic macros supported by C99.
*
* Note: the ACPI_DO_WHILE0 macro is used to prevent some compilers from
* complaining about these constructs. On other compilers the do...while
* adds some extra code, so this feature is optional.
*/
#ifdef ACPI_USE_DO_WHILE_0
#define ACPI_DO_WHILE0(a) do a while(0)
#else
#define ACPI_DO_WHILE0(a) a
#endif
/* DEBUG_PRINT functions */
#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist
#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist
/* Helper macros for DEBUG_PRINT */
#define ACPI_DO_DEBUG_PRINT(function, level, line, filename, modulename, component, ...) \
ACPI_DO_WHILE0 ({ \
if (ACPI_IS_DEBUG_ENABLED (level, component)) \
{ \
function (level, line, filename, modulename, component, __VA_ARGS__); \
} \
})
#define ACPI_ACTUAL_DEBUG(level, line, filename, modulename, component, ...) \
ACPI_DO_DEBUG_PRINT (acpi_debug_print, level, line, \
filename, modulename, component, __VA_ARGS__)
#define ACPI_ACTUAL_DEBUG_RAW(level, line, filename, modulename, component, ...) \
ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \
filename, modulename, component, __VA_ARGS__)
/*
* Function entry tracing
*
* The name of the function is emitted as a local variable that is
* intended to be used by both the entry trace and the exit trace.
*/
/* Helper macro */
#define ACPI_TRACE_ENTRY(name, function, type, param) \
ACPI_FUNCTION_NAME (name) \
function (ACPI_DEBUG_PARAMETERS, (type) (param))
/* The actual entry trace macros */
#define ACPI_FUNCTION_TRACE(name) \
ACPI_FUNCTION_NAME(name) \
acpi_ut_trace (ACPI_DEBUG_PARAMETERS)
#define ACPI_FUNCTION_TRACE_PTR(name, pointer) \
ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, void *, pointer)
#define ACPI_FUNCTION_TRACE_U32(name, value) \
ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value)
#define ACPI_FUNCTION_TRACE_STR(name, string) \
ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, char *, string)
#define ACPI_FUNCTION_ENTRY() \
acpi_ut_track_stack_ptr()
/*
* Function exit tracing
*
* These macros include a return statement. This is usually considered
* bad form, but having a separate exit macro before the actual return
* is very ugly and difficult to maintain.
*
* One of the FUNCTION_TRACE macros above must be used in conjunction
* with these macros so that "_AcpiFunctionName" is defined.
*
* There are two versions of most of the return macros. The default version is
* safer, since it avoids side-effects by guaranteeing that the argument will
* not be evaluated twice.
*
* A less-safe version of the macros is provided for optional use if the
* compiler uses excessive CPU stack (for example, this may happen in the
* debug case if code optimzation is disabled.)
*/
/* Exit trace helper macro */
#ifndef ACPI_SIMPLE_RETURN_MACROS
#define ACPI_TRACE_EXIT(function, type, param) \
ACPI_DO_WHILE0 ({ \
register type _param = (type) (param); \
function (ACPI_DEBUG_PARAMETERS, _param); \
return (_param); \
})
#else /* Use original less-safe macros */
#define ACPI_TRACE_EXIT(function, type, param) \
ACPI_DO_WHILE0 ({ \
function (ACPI_DEBUG_PARAMETERS, (type) (param)); \
return (param); \
})
#endif /* ACPI_SIMPLE_RETURN_MACROS */
/* The actual exit macros */
#define return_VOID \
ACPI_DO_WHILE0 ({ \
acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
return; \
})
#define return_ACPI_STATUS(status) \
ACPI_TRACE_EXIT (acpi_ut_status_exit, acpi_status, status)
#define return_PTR(pointer) \
ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer)
#define return_VALUE(value) \
ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value)
#define return_UINT32(value) \
ACPI_TRACE_EXIT (acpi_ut_value_exit, u32, value)
#define return_UINT8(value) \
ACPI_TRACE_EXIT (acpi_ut_value_exit, u8, value)
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
#define _VERBOSE_STRUCTURES
/* Various object display routines for debug */
#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
#else /* ACPI_DEBUG_OUTPUT */
/*
* This is the non-debug case -- make everything go away,
* leaving no executable debug code!
*/
#define ACPI_FUNCTION_NAME(a)
#define ACPI_DEBUG_PRINT(pl)
#define ACPI_DEBUG_PRINT_RAW(pl)
#define ACPI_DEBUG_EXEC(a)
#define ACPI_DEBUG_ONLY_MEMBERS(a)
#define ACPI_FUNCTION_TRACE(a)
#define ACPI_FUNCTION_TRACE_PTR(a, b)
#define ACPI_FUNCTION_TRACE_U32(a, b)
#define ACPI_FUNCTION_TRACE_STR(a, b)
#define ACPI_FUNCTION_EXIT
#define ACPI_FUNCTION_STATUS_EXIT(s)
#define ACPI_FUNCTION_VALUE_EXIT(s)
#define ACPI_FUNCTION_ENTRY()
#define ACPI_DUMP_STACK_ENTRY(a)
#define ACPI_DUMP_OPERANDS(a, b, c)
#define ACPI_DUMP_ENTRY(a, b)
#define ACPI_DUMP_TABLES(a, b)
#define ACPI_DUMP_PATHNAME(a, b, c, d)
#define ACPI_DUMP_BUFFER(a, b)
#define ACPI_DEBUG_PRINT(pl)
#define ACPI_DEBUG_PRINT_RAW(pl)
#define ACPI_IS_DEBUG_ENABLED(level, component) 0
/* Return macros must have a return statement at the minimum */
#define return_VOID return
#define return_ACPI_STATUS(s) return(s)
#define return_PTR(s) return(s)
#define return_VALUE(s) return(s)
#define return_UINT8(s) return(s)
#define return_UINT32(s) return(s)
#endif /* ACPI_DEBUG_OUTPUT */
#endif /* __ACOUTPUT_H__ */
| prasidh09/cse506 | unionfs-3.10.y/include/acpi/acoutput.h | C | gpl-2.0 | 17,351 |
declare namespace jsrsasign.KJUR.asn1.csr {
/**
* ASN.1 CertificationRequestInfo structure class
* @param params associative array of parameters (ex. {})
* @description
* ```
* // -- DEFINITION OF ASN.1 SYNTAX --
* // CertificationRequestInfo ::= SEQUENCE {
* // version INTEGER { v1(0) } (v1,...),
* // subject Name,
* // subjectPKInfo SubjectPublicKeyInfo{{ PKInfoAlgorithms }},
* // attributes [0] Attributes{{ CRIAttributes }} }
* ```
*
* @example
* csri = new KJUR.asn1.csr.CertificationRequestInfo();
* csri.setSubjectByParam({'str': '/C=US/O=Test/CN=example.com'});
* csri.setSubjectPublicKeyByGetKey(pubKeyObj);
*/
class CertificationRequestInfo extends ASN1Object {
constructor();
_initialize(): void;
/**
* set subject name field by parameter
* @param x500NameParam X500Name parameter
* @description
* @example
* csri.setSubjectByParam({'str': '/C=US/CN=b'});
* @see KJUR.asn1.x509.X500Name
*/
setSubjectByParam(x500NameParam: StringParam): void;
/**
* set subject public key info by RSA/ECDSA/DSA key parameter
* @param keyParam public key parameter which passed to `KEYUTIL.getKey` argument
* @example
* csri.setSubjectPublicKeyByGetKeyParam(certPEMString); // or
* csri.setSubjectPublicKeyByGetKeyParam(pkcs8PublicKeyPEMString); // or
* csir.setSubjectPublicKeyByGetKeyParam(kjurCryptoECDSAKeyObject); // et.al.
* @see KJUR.asn1.x509.SubjectPublicKeyInfo
* @see KEYUTIL.getKey
*/
setSubjectPublicKeyByGetKey(
keyParam: RSAKey | crypto.ECDSA | crypto.DSA | jws.JWS.JsonWebKey | { n: string; e: string } | string,
): void;
/**
* append X.509v3 extension to this object by name and parameters
* @param name name of X.509v3 Extension object
* @param extParams parameters as argument of Extension constructor.
* @see KJUR.asn1.x509.Extension
* @example
* var o = new KJUR.asn1.csr.CertificationRequestInfo();
* o.appendExtensionByName('BasicConstraints', {'cA':true, 'critical': true});
* o.appendExtensionByName('KeyUsage', {'bin':'11'});
* o.appendExtensionByName('CRLDistributionPoints', {uri: 'http://aaa.com/a.crl'});
* o.appendExtensionByName('ExtKeyUsage', {array: [{name: 'clientAuth'}]});
* o.appendExtensionByName('AuthorityKeyIdentifier', {kid: '1234ab..'});
* o.appendExtensionByName('AuthorityInfoAccess', {array: [{accessMethod:{oid:...},accessLocation:{uri:...}}]});
*/
appendExtensionByName(
name: string,
extParams:
| { ca: boolean; critical: boolean }
| BinParam
| x509.UriParam
| ArrayParam<{ name: string }>
| { kid: string }
| ArrayParam<{ accessMethod: { oid: string }; accessLocation: x509.UriParam }>,
): void;
getEncodedHex(): string;
}
}
| markogresak/DefinitelyTyped | types/jsrsasign/modules/KJUR/asn1/csr/CertificationRequestInfo.d.ts | TypeScript | mit | 3,178 |
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Tool
* @subpackage Framework
* @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id$
*/
/**
* @see Zend_Tool_Project_Context_Filesystem_Directory
*/
require_once 'Zend/Tool/Project/Context/Filesystem/Directory.php';
/**
* This class is the front most class for utilizing Zend_Tool_Project
*
* A profile is a hierarchical set of resources that keep track of
* items within a specific project.
*
* @category Zend
* @package Zend_Tool
* @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Tool_Project_Context_Zf_ControllersDirectory extends Zend_Tool_Project_Context_Filesystem_Directory
{
/**
* @var string
*/
protected $_filesystemName = 'controllers';
/**
* getName()
*
* @return string
*/
public function getName()
{
return 'ControllersDirectory';
}
}
| groundcall/jobeet | vendor/Zend/Tool/Project/Context/Zf/ControllersDirectory.php | PHP | mit | 1,595 |
#include "rr.h"
namespace rr {
void Context::Init() {
ClassBuilder("Context").
defineSingletonMethod("New", &New).
defineSingletonMethod("GetCurrent", &GetCurrent).
defineSingletonMethod("GetEntered", &GetEntered).
defineSingletonMethod("GetCalling", &GetCalling).
defineSingletonMethod("InContext", &InContext).
defineMethod("Dispose", &Dispose).
defineMethod("Global", &Global).
defineMethod("DetachGlobal", &Global).
defineMethod("ReattachGlobal", &ReattachGlobal).
defineMethod("SetSecurityToken", &SetSecurityToken).
defineMethod("UseDefaultSecurityToken", &UseDefaultSecurityToken).
defineMethod("GetSecurityToken", &GetSecurityToken).
defineMethod("HasOutOfMemoryException", &HasOutOfMemoryException).
defineMethod("SetEmbedderData", &SetEmbedderData).
defineMethod("GetEmbedderData", &GetEmbedderData).
defineMethod("AllowCodeGenerationFromStrings", &AllowCodeGenerationFromStrings).
defineMethod("IsCodeGenerationFromStringsAllowed", &IsCodeGenerationFromStringsAllowed).
defineMethod("Enter", &Enter).
defineMethod("Exit", &Exit).
store(&Class);
ClassBuilder("ExtensionConfiguration").
defineSingletonMethod("new", &ExtensionConfiguration::initialize).
store(&ExtensionConfiguration::Class);
}
VALUE Context::Dispose(VALUE self) {
Void(Context(self).dispose())
}
VALUE Context::Global(VALUE self) {
return Object(Context(self)->Global());
}
VALUE Context::DetachGlobal(VALUE self) {
Void(Context(self)->DetachGlobal());
}
VALUE Context::ReattachGlobal(VALUE self, VALUE global) {
Void(Context(self)->ReattachGlobal(Object(global)));
}
VALUE Context::GetEntered(VALUE self) {
return Context(v8::Context::GetEntered());
}
VALUE Context::GetCurrent(VALUE self) {
return Context(v8::Context::GetCurrent());
}
VALUE Context::GetCalling(VALUE self) {
return Context(v8::Context::GetCalling());
}
VALUE Context::SetSecurityToken(VALUE self, VALUE token) {
Void(Context(self)->SetSecurityToken(Value(token)));
}
VALUE Context::UseDefaultSecurityToken(VALUE self) {
Void(Context(self)->UseDefaultSecurityToken());
}
VALUE Context::GetSecurityToken(VALUE self) {
return Value(Context(self)->GetSecurityToken());
}
VALUE Context::HasOutOfMemoryException(VALUE self) {
return Bool(Context(self)->HasOutOfMemoryException());
}
VALUE Context::InContext(VALUE self) {
return Bool(v8::Context::InContext());
}
VALUE Context::SetEmbedderData(VALUE self, VALUE index, VALUE data) {
Void(Context(self)->SetEmbedderData(NUM2INT(index), Value(data)));
}
VALUE Context::GetEmbedderData(VALUE self, VALUE index) {
Void(Context(self)->GetEmbedderData(NUM2INT(index)));
}
VALUE Context::AllowCodeGenerationFromStrings(VALUE self, VALUE allow) {
Void(Context(self)->AllowCodeGenerationFromStrings(RTEST(allow)));
}
VALUE Context::IsCodeGenerationFromStringsAllowed(VALUE self) {
return Bool(Context(self)->IsCodeGenerationFromStringsAllowed());
}
VALUE ExtensionConfiguration::initialize(VALUE self, VALUE names) {
int length = RARRAY_LENINT(names);
const char* array[length];
for (int i = 0; i < length; i++) {
array[i] = RSTRING_PTR(rb_ary_entry(names, i));
}
return ExtensionConfiguration(new v8::ExtensionConfiguration(length, array));
}
VALUE Context::New(int argc, VALUE argv[], VALUE self) {
VALUE extension_configuration; VALUE global_template; VALUE global_object;
rb_scan_args(argc, argv, "03", &extension_configuration, &global_template, &global_object);
v8::Persistent<v8::Context> context(v8::Context::New(
ExtensionConfiguration(extension_configuration),
*ObjectTemplate(global_template),
*Object(global_object)
));
Context reference(context);
context.Dispose();
return reference;
}
VALUE Context::Enter(VALUE self) {
Void(Context(self)->Enter());
}
VALUE Context::Exit(VALUE self) {
Void(Context(self)->Exit());
}
template <> void Pointer<v8::ExtensionConfiguration>::unwrap(VALUE value) {
Data_Get_Struct(value, class v8::ExtensionConfiguration, pointer);
}
}
| ducktyper/bfnz | vendor/cache/ruby/2.3.0/gems/therubyracer-0.12.2/ext/v8/context.cc | C++ | mit | 4,054 |
// Type definitions for jsreport-html-embedded-in-docx 1.0
// Project: https://github.com/jsreport/jsreport-html-embedded-in-docx
// Definitions by: taoqf <https://github.com/taoqf>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
import { ExtensionDefinition } from 'jsreport-core';
declare module 'jsreport-core' {
interface Template {
recipe: 'html-embedded-in-docx' | string;
}
}
declare function JsReportHtmlEmbeddedInDocx(): ExtensionDefinition;
export = JsReportHtmlEmbeddedInDocx;
| borisyankov/DefinitelyTyped | types/jsreport-html-embedded-in-docx/index.d.ts | TypeScript | mit | 542 |
#include <stdio.h>
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
typedef const char *str_t;
#include "kbtree.h"
KBTREE_INIT(int, uint32_t, kb_generic_cmp)
KBTREE_INIT(str, str_t, kb_str_cmp)
static int data_size = 5000000;
static unsigned *int_data;
static char **str_data;
void ht_init_data()
{
int i;
char buf[256];
printf("--- generating data... ");
srand48(11);
int_data = (unsigned*)calloc(data_size, sizeof(unsigned));
str_data = (char**)calloc(data_size, sizeof(char*));
for (i = 0; i < data_size; ++i) {
int_data[i] = (unsigned)(data_size * drand48() / 4) * 271828183u;
sprintf(buf, "%x", int_data[i]);
str_data[i] = strdup(buf);
}
printf("done!\n");
}
void ht_destroy_data()
{
int i;
for (i = 0; i < data_size; ++i) free(str_data[i]);
free(str_data); free(int_data);
}
void ht_khash_int()
{
int i;
unsigned *data = int_data;
uint32_t *l, *u;
kbtree_t(int) *h;
h = kb_init(int, KB_DEFAULT_SIZE);
for (i = 0; i < data_size; ++i) {
if (kb_get(int, h, data[i]) == 0) kb_put(int, h, data[i]);
else kb_del(int, h, data[i]);
}
printf("[ht_khash_int] size: %d\n", kb_size(h));
if (1) {
int cnt = 0;
uint32_t x, y;
kb_interval(int, h, 2174625464u, &l, &u);
printf("interval for 2174625464: (%u, %u)\n", l? *l : 0, u? *u : 0);
#define traverse_f(p) { if (cnt == 0) y = *p; ++cnt; }
__kb_traverse(uint32_t, h, traverse_f);
__kb_get_first(uint32_t, h, x);
printf("# of elements from traversal: %d\n", cnt);
printf("first element: %d == %d\n", x, y);
}
__kb_destroy(h);
}
void ht_khash_str()
{
int i;
char **data = str_data;
kbtree_t(str) *h;
h = kb_init(str, KB_DEFAULT_SIZE);
for (i = 0; i < data_size; ++i) {
if (kb_get(str, h, data[i]) == 0) kb_put(str, h, data[i]);
else kb_del(str, h, data[i]);
}
printf("[ht_khash_int] size: %d\n", kb_size(h));
__kb_destroy(h);
}
void ht_timing(void (*f)(void))
{
clock_t t = clock();
(*f)();
printf("[ht_timing] %.3lf sec\n", (double)(clock() - t) / CLOCKS_PER_SEC);
}
int main(int argc, char *argv[])
{
if (argc > 1) data_size = atoi(argv[1]);
ht_init_data();
ht_timing(ht_khash_int);
ht_timing(ht_khash_str);
ht_destroy_data();
return 0;
}
| libnano/primer3-py | primer3/src/libprimer3/klib/test/kbtree_test.c | C | gpl-2.0 | 2,209 |
/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2003 Hewlett-Packard Development Company LP.
* Developed under the sponsorship of the US Government under
* Subcontract No. B514193
*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2010, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
/**
* This file implements POSIX lock type for Lustre.
* Its policy properties are start and end of extent and PID.
*
* These locks are only done through MDS due to POSIX semantics requiring
* e.g. that locks could be only partially released and as such split into
* two parts, and also that two adjacent locks from the same process may be
* merged into a single wider lock.
*
* Lock modes are mapped like this:
* PR and PW for READ and WRITE locks
* NL to request a releasing of a portion of the lock
*
* These flock locks never timeout.
*/
#define DEBUG_SUBSYSTEM S_LDLM
#include <lustre_dlm.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_lib.h>
#include <linux/list.h>
#include "ldlm_internal.h"
/**
* list_for_remaining_safe - iterate over the remaining entries in a list
* and safeguard against removal of a list entry.
* \param pos the &struct list_head to use as a loop counter. pos MUST
* have been initialized prior to using it in this macro.
* \param n another &struct list_head to use as temporary storage
* \param head the head for your list.
*/
#define list_for_remaining_safe(pos, n, head) \
for (n = pos->next; pos != (head); pos = n, n = pos->next)
static inline int
ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
{
return((new->l_policy_data.l_flock.owner ==
lock->l_policy_data.l_flock.owner) &&
(new->l_export == lock->l_export));
}
static inline int
ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
{
return((new->l_policy_data.l_flock.start <=
lock->l_policy_data.l_flock.end) &&
(new->l_policy_data.l_flock.end >=
lock->l_policy_data.l_flock.start));
}
static inline void
ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
{
LDLM_DEBUG(lock, "%s(mode: %d, flags: 0x%llx)",
__func__, mode, flags);
/* Safe to not lock here, since it should be empty anyway */
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
/* when reaching here, it is under lock_res_and_lock(). Thus,
* need call the nolock version of ldlm_lock_decref_internal
*/
ldlm_lock_decref_internal_nolock(lock, mode);
}
ldlm_lock_destroy_nolock(lock);
}
/**
* Process a granting attempt for flock lock.
* Must be called under ns lock held.
*
* This function looks for any conflicts for \a lock in the granted or
* waiting queues. The lock is granted if no conflicts are found in
* either queue.
*
* It is also responsible for splitting a lock if a portion of the lock
* is released.
*
* If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
* - blocking ASTs have already been sent
*
* If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
* - blocking ASTs have not been sent yet, so list of conflicting locks
* would be collected and ASTs sent.
*/
static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
int first_enq, enum ldlm_error *err,
struct list_head *work_list)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
struct list_head *tmp;
struct list_head *ownlocks = NULL;
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
enum ldlm_mode mode = req->l_req_mode;
int added = (mode == LCK_NL);
int overlaps = 0;
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { };
CDEBUG(D_DLMTRACE,
"flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
*flags, new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
req->l_policy_data.l_flock.end);
*err = ELDLM_OK;
/* No blocking ASTs are sent to the clients for
* Posix file & record locks
*/
req->l_blocking_ast = NULL;
reprocess:
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list.
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
}
}
} else {
int reprocess_failed = 0;
lockmode_verify(mode);
/* This loop determines if there are existing locks
* that conflict with the new lock request.
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
ownlocks = tmp;
continue;
}
/* locks are compatible, overlap doesn't matter */
if (lockmode_compat(lock->l_granted_mode, mode))
continue;
if (!ldlm_flocks_overlap(lock, req))
continue;
if (!first_enq) {
reprocess_failed = 1;
continue;
}
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
ldlm_flock_destroy(req, mode, *flags);
*err = -EAGAIN;
return LDLM_ITER_STOP;
}
if (*flags & LDLM_FL_TEST_LOCK) {
ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = lock->l_granted_mode;
req->l_policy_data.l_flock.pid =
lock->l_policy_data.l_flock.pid;
req->l_policy_data.l_flock.start =
lock->l_policy_data.l_flock.start;
req->l_policy_data.l_flock.end =
lock->l_policy_data.l_flock.end;
*flags |= LDLM_FL_LOCK_CHANGED;
return LDLM_ITER_STOP;
}
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
return LDLM_ITER_STOP;
}
if (reprocess_failed)
return LDLM_ITER_CONTINUE;
}
if (*flags & LDLM_FL_TEST_LOCK) {
ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = LCK_NL;
*flags |= LDLM_FL_LOCK_CHANGED;
return LDLM_ITER_STOP;
}
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks.
*/
if (!ownlocks)
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
if (lock->l_granted_mode == mode) {
/* If the modes are the same then we need to process
* locks that overlap OR adjoin the new lock. The extra
* logic condition is necessary to deal with arithmetic
* overflow and underflow.
*/
if ((new->l_policy_data.l_flock.start >
(lock->l_policy_data.l_flock.end + 1)) &&
(lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF))
continue;
if ((new->l_policy_data.l_flock.end <
(lock->l_policy_data.l_flock.start - 1)) &&
(lock->l_policy_data.l_flock.start != 0))
break;
if (new->l_policy_data.l_flock.start <
lock->l_policy_data.l_flock.start) {
lock->l_policy_data.l_flock.start =
new->l_policy_data.l_flock.start;
} else {
new->l_policy_data.l_flock.start =
lock->l_policy_data.l_flock.start;
}
if (new->l_policy_data.l_flock.end >
lock->l_policy_data.l_flock.end) {
lock->l_policy_data.l_flock.end =
new->l_policy_data.l_flock.end;
} else {
new->l_policy_data.l_flock.end =
lock->l_policy_data.l_flock.end;
}
if (added) {
ldlm_flock_destroy(lock, mode, *flags);
} else {
new = lock;
added = 1;
}
continue;
}
if (new->l_policy_data.l_flock.start >
lock->l_policy_data.l_flock.end)
continue;
if (new->l_policy_data.l_flock.end <
lock->l_policy_data.l_flock.start)
break;
++overlaps;
if (new->l_policy_data.l_flock.start <=
lock->l_policy_data.l_flock.start) {
if (new->l_policy_data.l_flock.end <
lock->l_policy_data.l_flock.end) {
lock->l_policy_data.l_flock.start =
new->l_policy_data.l_flock.end + 1;
break;
}
ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
continue;
}
if (new->l_policy_data.l_flock.end >=
lock->l_policy_data.l_flock.end) {
lock->l_policy_data.l_flock.end =
new->l_policy_data.l_flock.start - 1;
continue;
}
/* split the existing lock into two locks */
/* if this is an F_UNLCK operation then we could avoid
* allocating a new lock and use the req lock passed in
* with the request but this would complicate the reply
* processing since updates to req get reflected in the
* reply. The client side replays the lock request so
* it must see the original lock data in the reply.
*/
/* XXX - if ldlm_lock_new() can sleep we should
* release the lr_lock, allocate the new lock,
* and restart processing this lock.
*/
if (!new2) {
unlock_res_and_lock(req);
new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
lock->l_granted_mode, &null_cbs,
NULL, 0, LVB_T_NONE);
lock_res_and_lock(req);
if (IS_ERR(new2)) {
ldlm_flock_destroy(req, lock->l_granted_mode,
*flags);
*err = PTR_ERR(new2);
return LDLM_ITER_STOP;
}
goto reprocess;
}
splitted = 1;
new2->l_granted_mode = lock->l_granted_mode;
new2->l_policy_data.l_flock.pid =
new->l_policy_data.l_flock.pid;
new2->l_policy_data.l_flock.owner =
new->l_policy_data.l_flock.owner;
new2->l_policy_data.l_flock.start =
lock->l_policy_data.l_flock.start;
new2->l_policy_data.l_flock.end =
new->l_policy_data.l_flock.start - 1;
lock->l_policy_data.l_flock.start =
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export) {
new2->l_export = class_export_lock_get(lock->l_export,
new2);
if (new2->l_export->exp_lock_hash &&
hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
&new2->l_remote_handle,
&new2->l_exp_hash);
}
if (*flags == LDLM_FL_WAIT_NOREPROC)
ldlm_lock_addref_internal_nolock(new2,
lock->l_granted_mode);
/* insert new2 at lock */
ldlm_resource_add_lock(res, ownlocks, new2);
LDLM_LOCK_RELEASE(new2);
break;
}
/* if new2 is created but never used, destroy it*/
if (splitted == 0 && new2)
ldlm_lock_destroy_nolock(new2);
/* At this point we're granting the lock request. */
req->l_granted_mode = req->l_req_mode;
if (!added) {
list_del_init(&req->l_res_link);
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
if (*flags != LDLM_FL_WAIT_NOREPROC) {
/* The only one possible case for client-side calls flock
* policy function is ldlm_flock_completion_ast inside which
* carries LDLM_FL_WAIT_NOREPROC flag.
*/
CERROR("Illegal parameter for client-side-only module.\n");
LBUG();
}
/* In case we're reprocessing the requested lock we can't destroy
* it until after calling ldlm_add_ast_work_item() above so that laawi()
* can bump the reference count on \a req. Otherwise \a req
* could be freed before the completion AST can be sent.
*/
if (added)
ldlm_flock_destroy(req, mode, *flags);
ldlm_resource_dump(D_INFO, res);
return LDLM_ITER_CONTINUE;
}
struct ldlm_flock_wait_data {
struct ldlm_lock *fwd_lock;
int fwd_generation;
};
static void
ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
lock_res_and_lock(lock);
/* client side - set flag to prevent lock from being put on LRU list */
ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
}
/**
* Flock completion callback function.
*
* \param lock [in,out]: A lock to be handled
* \param flags [in]: flags
* \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
*
* \retval 0 : success
* \retval <0 : failure
*/
int
ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
struct file_lock *getlk = lock->l_ast_data;
struct obd_device *obd;
struct obd_import *imp = NULL;
struct ldlm_flock_wait_data fwd;
struct l_wait_info lwi;
enum ldlm_error err;
int rc = 0;
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_FAIL_LOC;
unlock_res_and_lock(lock);
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
}
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
if (flags & LDLM_FL_FAILED)
goto granted;
if (!(flags & LDLM_FL_BLOCKED_MASK)) {
if (!data)
/* mds granted the lock in the reply */
goto granted;
/* CP AST RPC: lock get granted, wake it up */
wake_up(&lock->l_waitq);
return 0;
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
fwd.fwd_lock = lock;
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, there is no import */
if (obd)
imp = obd->u.cli.cl_import;
if (imp) {
spin_lock(&imp->imp_lock);
fwd.fwd_generation = imp->imp_generation;
spin_unlock(&imp->imp_lock);
}
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
/* Go to sleep until the lock is granted. */
rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
return rc;
}
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
lock_res_and_lock(lock);
/* DEADLOCK is always set with CBPENDING */
lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
unlock_res_and_lock(lock);
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
}
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
lock_res_and_lock(lock);
/* DEADLOCK is always set with CBPENDING */
lock->l_flags |= LDLM_FL_FAIL_LOC |
LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
unlock_res_and_lock(lock);
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
}
lock_res_and_lock(lock);
/*
* Protect against race where lock could have been just destroyed
* due to overlap in ldlm_process_flock_lock().
*/
if (ldlm_is_destroyed(lock)) {
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
/*
* An error is still to be returned, to propagate it up to
* ldlm_cli_enqueue_fini() caller.
*/
return -EIO;
}
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
ldlm_resource_unlink_lock(lock);
/*
* Import invalidation. We need to actually release the lock
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
* server already dropped it anyway. Only for granted locks too.
*/
/* Do the same for DEADLOCK'ed locks. */
if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
int mode;
if (flags & LDLM_FL_TEST_LOCK)
LASSERT(ldlm_is_test_lock(lock));
if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
mode = getlk->fl_type;
else
mode = lock->l_granted_mode;
if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
}
ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
unlock_res_and_lock(lock);
/* Need to wake up the waiter if we were evicted */
wake_up(&lock->l_waitq);
/*
* An error is still to be returned, to propagate it up to
* ldlm_cli_enqueue_fini() caller.
*/
return rc ? : -EIO;
}
LDLM_DEBUG(lock, "client-side enqueue granted");
if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.
*/
LASSERT(ldlm_is_test_lock(lock));
ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
getlk->fl_type = F_RDLCK;
break;
case LCK_PW:
getlk->fl_type = F_WRLCK;
break;
default:
getlk->fl_type = F_UNLCK;
}
getlk->fl_pid = -(pid_t)lock->l_policy_data.l_flock.pid;
getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
/* We need to reprocess the lock to do merges or splits
* with existing locks owned by this process.
*/
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
return rc;
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
union ldlm_policy_data *lpolicy)
{
lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
}
void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
union ldlm_wire_policy_data *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
}
| BPI-SINOVOIP/BPI-Mainline-kernel | linux-4.14/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c | C | gpl-2.0 | 18,498 |
//------------------------------------------------------------------------------
// <copyright file="dl_list.h" company="Atheros">
// Copyright (c) 2004-2008 Atheros Corporation. All rights reserved.
//
// The software source and binaries included in this development package are
// licensed, not sold. You, or your company, received the package under one
// or more license agreements. The rights granted to you are specifically
// listed in these license agreement(s). All other rights remain with Atheros
// Communications, Inc., its subsidiaries, or the respective owner including
// those listed on the included copyright notices. Distribution of any
// portion of this package must be in strict compliance with the license
// agreement(s) terms.
// </copyright>
//
// <summary>
// Wifi driver for AR6002
// </summary>
//
//------------------------------------------------------------------------------
//==============================================================================
// Double-link list definitions (adapted from Atheros SDIO stack)
//
// Author(s): ="Atheros"
//==============================================================================
#ifndef __DL_LIST_H___
#define __DL_LIST_H___
#include "a_osapi.h"
#define A_CONTAINING_STRUCT(address, struct_type, field_name)\
((struct_type *)((A_UINT32)(address) - (A_UINT32)(&((struct_type *)0)->field_name)))
/* list functions */
/* pointers for the list */
typedef struct _DL_LIST {
struct _DL_LIST *pPrev;
struct _DL_LIST *pNext;
}DL_LIST, *PDL_LIST;
/*
* DL_LIST_INIT , initialize doubly linked list
*/
#define DL_LIST_INIT(pList)\
{(pList)->pPrev = pList; (pList)->pNext = pList;}
/* faster macro to init list and add a single item */
#define DL_LIST_INIT_AND_ADD(pList,pItem) \
{ (pList)->pPrev = (pItem); \
(pList)->pNext = (pItem); \
(pItem)->pNext = (pList); \
(pItem)->pPrev = (pList); \
}
#define DL_LIST_IS_EMPTY(pList) (((pList)->pPrev == (pList)) && ((pList)->pNext == (pList)))
#define DL_LIST_GET_ITEM_AT_HEAD(pList) (pList)->pNext
#define DL_LIST_GET_ITEM_AT_TAIL(pList) (pList)->pPrev
/*
* ITERATE_OVER_LIST pStart is the list, pTemp is a temp list member
* NOT: do not use this function if the items in the list are deleted inside the
* iteration loop
*/
#define ITERATE_OVER_LIST(pStart, pTemp) \
for((pTemp) =(pStart)->pNext; pTemp != (pStart); (pTemp) = (pTemp)->pNext)
/* safe iterate macro that allows the item to be removed from the list
* the iteration continues to the next item in the list
*/
#define ITERATE_OVER_LIST_ALLOW_REMOVE(pStart,pItem,st,offset) \
{ \
PDL_LIST pTemp; \
pTemp = (pStart)->pNext; \
while (pTemp != (pStart)) { \
(pItem) = A_CONTAINING_STRUCT(pTemp,st,offset); \
pTemp = pTemp->pNext; \
#define ITERATE_END }}
/*
* DL_ListInsertTail - insert pAdd to the end of the list
*/
static INLINE PDL_LIST DL_ListInsertTail(PDL_LIST pList, PDL_LIST pAdd) {
/* insert at tail */
pAdd->pPrev = pList->pPrev;
pAdd->pNext = pList;
pList->pPrev->pNext = pAdd;
pList->pPrev = pAdd;
return pAdd;
}
/*
* DL_ListInsertHead - insert pAdd into the head of the list
*/
static INLINE PDL_LIST DL_ListInsertHead(PDL_LIST pList, PDL_LIST pAdd) {
/* insert at head */
pAdd->pPrev = pList;
pAdd->pNext = pList->pNext;
pList->pNext->pPrev = pAdd;
pList->pNext = pAdd;
return pAdd;
}
#define DL_ListAdd(pList,pItem) DL_ListInsertHead((pList),(pItem))
/*
* DL_ListRemove - remove pDel from list
*/
static INLINE PDL_LIST DL_ListRemove(PDL_LIST pDel) {
pDel->pNext->pPrev = pDel->pPrev;
pDel->pPrev->pNext = pDel->pNext;
/* point back to itself just to be safe, incase remove is called again */
pDel->pNext = pDel;
pDel->pPrev = pDel;
return pDel;
}
/*
* DL_ListRemoveItemFromHead - get a list item from the head
*/
static INLINE PDL_LIST DL_ListRemoveItemFromHead(PDL_LIST pList) {
PDL_LIST pItem = NULL;
if (pList->pNext != pList) {
pItem = pList->pNext;
/* remove the first item from head */
DL_ListRemove(pItem);
}
return pItem;
}
static INLINE PDL_LIST DL_ListRemoveItemFromTail(PDL_LIST pList) {
PDL_LIST pItem = NULL;
if (pList->pPrev != pList) {
pItem = pList->pPrev;
/* remove the item from tail */
DL_ListRemove(pItem);
}
return pItem;
}
/* transfer src list items to the tail of the destination list */
static INLINE void DL_ListTransferItemsToTail(PDL_LIST pDest, PDL_LIST pSrc) {
/* only concatenate if src is not empty */
if (!DL_LIST_IS_EMPTY(pSrc)) {
/* cut out circular list in src and re-attach to end of dest */
pSrc->pPrev->pNext = pDest;
pSrc->pNext->pPrev = pDest->pPrev;
pDest->pPrev->pNext = pSrc->pNext;
pDest->pPrev = pSrc->pPrev;
/* terminate src list, it is now empty */
pSrc->pPrev = pSrc;
pSrc->pNext = pSrc;
}
}
#endif /* __DL_LIST_H___ */
| fards/Ainol_fire_kernel | drivers/amlogic/wifi/atheros_6302/include/dl_list.h | C | gpl-2.0 | 5,210 |
/*
* TI ADC081C/ADC101C/ADC121C 8/10/12-bit ADC driver
*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2016 Intel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Datasheets:
* http://www.ti.com/lit/ds/symlink/adc081c021.pdf
* http://www.ti.com/lit/ds/symlink/adc101c021.pdf
* http://www.ti.com/lit/ds/symlink/adc121c021.pdf
*
* The devices have a very similar interface and differ mostly in the number of
* bits handled. For the 8-bit and 10-bit models the least-significant 4 or 2
* bits of value registers are reserved.
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regulator/consumer.h>
struct adc081c {
struct i2c_client *i2c;
struct regulator *ref;
/* 8, 10 or 12 */
int bits;
};
#define REG_CONV_RES 0x00
static int adc081c_read_raw(struct iio_dev *iio,
struct iio_chan_spec const *channel, int *value,
int *shift, long mask)
{
struct adc081c *adc = iio_priv(iio);
int err;
switch (mask) {
case IIO_CHAN_INFO_RAW:
err = i2c_smbus_read_word_swapped(adc->i2c, REG_CONV_RES);
if (err < 0)
return err;
*value = (err & 0xFFF) >> (12 - adc->bits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
err = regulator_get_voltage(adc->ref);
if (err < 0)
return err;
*value = err / 1000;
*shift = adc->bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
return -EINVAL;
}
#define ADCxx1C_CHAN(_bits) { \
.type = IIO_VOLTAGE, \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.scan_type = { \
.sign = 'u', \
.realbits = (_bits), \
.storagebits = 16, \
.shift = 12 - (_bits), \
.endianness = IIO_CPU, \
}, \
}
#define DEFINE_ADCxx1C_CHANNELS(_name, _bits) \
static const struct iio_chan_spec _name ## _channels[] = { \
ADCxx1C_CHAN((_bits)), \
IIO_CHAN_SOFT_TIMESTAMP(1), \
}; \
#define ADC081C_NUM_CHANNELS 2
struct adcxx1c_model {
const struct iio_chan_spec* channels;
int bits;
};
#define ADCxx1C_MODEL(_name, _bits) \
{ \
.channels = _name ## _channels, \
.bits = (_bits), \
}
DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
/* Model ids are indexes in _models array */
enum adcxx1c_model_id {
ADC081C = 0,
ADC101C = 1,
ADC121C = 2,
};
static struct adcxx1c_model adcxx1c_models[] = {
ADCxx1C_MODEL(adc081c, 8),
ADCxx1C_MODEL(adc101c, 10),
ADCxx1C_MODEL(adc121c, 12),
};
static const struct iio_info adc081c_info = {
.read_raw = adc081c_read_raw,
.driver_module = THIS_MODULE,
};
static irqreturn_t adc081c_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc081c *data = iio_priv(indio_dev);
u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
int ret;
ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
if (ret < 0)
goto out;
buf[0] = ret;
iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static int adc081c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *iio;
struct adc081c *adc;
struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
return -ENOMEM;
adc = iio_priv(iio);
adc->i2c = client;
adc->bits = model->bits;
adc->ref = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(adc->ref))
return PTR_ERR(adc->ref);
err = regulator_enable(adc->ref);
if (err < 0)
return err;
iio->dev.parent = &client->dev;
iio->name = dev_name(&client->dev);
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
iio->channels = model->channels;
iio->num_channels = ADC081C_NUM_CHANNELS;
err = iio_triggered_buffer_setup(iio, NULL, adc081c_trigger_handler, NULL);
if (err < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
goto err_regulator_disable;
}
err = iio_device_register(iio);
if (err < 0)
goto err_buffer_cleanup;
i2c_set_clientdata(client, iio);
return 0;
err_buffer_cleanup:
iio_triggered_buffer_cleanup(iio);
err_regulator_disable:
regulator_disable(adc->ref);
return err;
}
static int adc081c_remove(struct i2c_client *client)
{
struct iio_dev *iio = i2c_get_clientdata(client);
struct adc081c *adc = iio_priv(iio);
iio_device_unregister(iio);
iio_triggered_buffer_cleanup(iio);
regulator_disable(adc->ref);
return 0;
}
static const struct i2c_device_id adc081c_id[] = {
{ "adc081c", ADC081C },
{ "adc101c", ADC101C },
{ "adc121c", ADC121C },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
#ifdef CONFIG_OF
static const struct of_device_id adc081c_of_match[] = {
{ .compatible = "ti,adc081c" },
{ .compatible = "ti,adc101c" },
{ .compatible = "ti,adc121c" },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
#endif
static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = of_match_ptr(adc081c_of_match),
},
.probe = adc081c_probe,
.remove = adc081c_remove,
.id_table = adc081c_id,
};
module_i2c_driver(adc081c_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("Texas Instruments ADC081C/ADC101C/ADC121C driver");
MODULE_LICENSE("GPL v2");
| geminy/aidear | oss/linux/linux-4.7/drivers/iio/adc/ti-adc081c.c | C | gpl-3.0 | 5,826 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: installp
author:
- Kairo Araujo (@kairoaraujo)
short_description: Manage packages on AIX
description:
- Manage packages using 'installp' on AIX
version_added: '2.8'
options:
accept_license:
description:
- Whether to accept the license for the package(s).
type: bool
default: no
name:
description:
- One or more packages to install or remove.
- Use C(all) to install all packages available on informed C(repository_path).
type: list
required: true
aliases: [ pkg ]
repository_path:
description:
- Path with AIX packages (required to install).
type: path
state:
description:
- Whether the package needs to be present on or absent from the system.
type: str
choices: [ absent, present ]
default: present
notes:
- If the package is already installed, even the package/fileset is new, the module will not install it.
'''
EXAMPLES = r'''
- name: Install package foo
installp:
name: foo
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
installp:
name: bos.sysmgt
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master only
installp:
name: bos.sysmgt.nim.master
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
installp:
name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Remove packages bos.sysmgt.nim.master
installp:
name: bos.sysmgt.nim.master
state: absent
'''
RETURN = r''' # '''
import os
import re
from ansible.module_utils.basic import AnsibleModule
def _check_new_pkg(module, package, repository_path):
"""
Check if the package of fileset is correct name and repository path.
:param module: Ansible module arguments spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package information.
"""
if os.path.isdir(repository_path):
installp_cmd = module.get_bin_path('installp', True)
rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
if package == 'all':
pkg_info = "All packages on dir"
return True, pkg_info
else:
pkg_info = {}
for line in package_result.splitlines():
if re.findall(package, line):
pkg_name = line.split()[0].strip()
pkg_version = line.split()[1].strip()
pkg_info[pkg_name] = pkg_version
return True, pkg_info
return False, None
else:
module.fail_json(msg="Repository path %s is not valid." % repository_path)
def _check_installed_pkg(module, package, repository_path):
"""
Check the package on AIX.
It verifies if the package is installed and informations
:param module: Ansible module parameters spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package data.
"""
lslpp_cmd = module.get_bin_path('lslpp', True)
rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
if rc == 1:
package_state = ' '.join(err.split()[-2:])
if package_state == 'not installed.':
return False, None
else:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
if rc != 0:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
pkg_data = {}
full_pkg_data = lslpp_result.splitlines()
for line in full_pkg_data:
pkg_name, fileset, level = line.split(':')[0:3]
pkg_data[pkg_name] = fileset, level
return True, pkg_data
def remove(module, installp_cmd, packages):
repository_path = None
remove_count = 0
removed_pkgs = []
not_found_pkg = []
for package in packages:
pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
if pkg_check:
if not module.check_mode:
rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
remove_count += 1
removed_pkgs.append(package)
else:
not_found_pkg.append(package)
if remove_count > 0:
if len(not_found_pkg) > 1:
not_found_pkg.insert(0, "Package(s) not found: ")
changed = True
msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
else:
changed = False
msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
return changed, msg
def install(module, installp_cmd, packages, repository_path, accept_license):
installed_pkgs = []
not_found_pkgs = []
already_installed_pkgs = {}
accept_license_param = {
True: '-Y',
False: '',
}
# Validate if package exists on repository path.
for package in packages:
pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
# If package exists on repository path, check if package is installed.
if pkg_check:
pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
# If package is already installed.
if pkg_check_current:
# Check if package is a package and not a fileset, get version
# and add the package into already installed list
if package in pkg_info.keys():
already_installed_pkgs[package] = pkg_info[package][1]
else:
# If the package is not a package but a fileset, confirm
# and add the fileset/package into already installed list
for key in pkg_info.keys():
if package in pkg_info[key]:
already_installed_pkgs[package] = pkg_info[key][1]
else:
if not module.check_mode:
rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
if rc != 0:
module.fail_json(msg="Failed to run installp", rc=rc, err=err)
installed_pkgs.append(package)
else:
not_found_pkgs.append(package)
if len(installed_pkgs) > 0:
installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
else:
installed_msg = ''
if len(not_found_pkgs) > 0:
not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
else:
not_found_msg = ''
if len(already_installed_pkgs) > 0:
already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
else:
already_installed_msg = ''
if len(installed_pkgs) > 0:
changed = True
msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
else:
changed = False
msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True, aliases=['pkg']),
repository_path=dict(type='path'),
accept_license=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
name = module.params['name']
repository_path = module.params['repository_path']
accept_license = module.params['accept_license']
state = module.params['state']
installp_cmd = module.get_bin_path('installp', True)
if state == 'present':
if repository_path is None:
module.fail_json(msg="repository_path is required to install package")
changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
elif state == 'absent':
changed, msg = remove(module, installp_cmd, name)
else:
module.fail_json(changed=False, msg="Unexpected state.")
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| andmos/ansible | lib/ansible/modules/packaging/os/installp.py | Python | gpl-3.0 | 9,242 |
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.core.vfs.configuration;
import java.io.IOException;
import java.lang.reflect.Method;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.i18n.BaseMessages;
/**
* This class supports overriding of config builders by supplying a VariableSpace containing a variable in the format of
* vfs.[scheme].config.parser where [scheme] is one of the VFS schemes (file, http, sftp, etc...)
*
* @author cboyden
*/
public class KettleFileSystemConfigBuilderFactory {
private static Class<?> PKG = KettleVFS.class; // for i18n purposes, needed by Translator2!!
/**
* This factory returns a FileSystemConfigBuilder. Custom FileSystemConfigBuilders can be created by implementing the
* {@link IKettleFileSystemConfigBuilder} or overriding the {@link KettleGenericFileSystemConfigBuilder}
*
* @see org.apache.commons.vfs.FileSystemConfigBuilder
*
* @param varSpace
* A Kettle variable space for resolving VFS config parameters
* @param scheme
* The VFS scheme (FILE, HTTP, SFTP, etc...)
* @return A FileSystemConfigBuilder that can translate Kettle variables into VFS config parameters
* @throws IOException
*/
public static IKettleFileSystemConfigBuilder getConfigBuilder( VariableSpace varSpace, String scheme ) throws IOException {
IKettleFileSystemConfigBuilder result = null;
// Attempt to load the Config Builder from a variable: vfs.config.parser = class
String parserClass = varSpace.getVariable( "vfs." + scheme + ".config.parser" );
if ( parserClass != null ) {
try {
Class<?> configBuilderClass =
KettleFileSystemConfigBuilderFactory.class.getClassLoader().loadClass( parserClass );
Method mGetInstance = configBuilderClass.getMethod( "getInstance" );
if ( ( mGetInstance != null )
&& ( IKettleFileSystemConfigBuilder.class.isAssignableFrom( mGetInstance.getReturnType() ) ) ) {
result = (IKettleFileSystemConfigBuilder) mGetInstance.invoke( null );
} else {
result = (IKettleFileSystemConfigBuilder) configBuilderClass.newInstance();
}
} catch ( Exception e ) {
// Failed to load custom parser. Throw exception.
throw new IOException( BaseMessages.getString( PKG, "CustomVfsSettingsParser.Log.FailedToLoad" ) );
}
} else {
// No custom parser requested, load default
if ( scheme.equalsIgnoreCase( "sftp" ) ) {
result = KettleSftpFileSystemConfigBuilder.getInstance();
} else {
result = KettleGenericFileSystemConfigBuilder.getInstance();
}
}
return result;
}
}
| apratkin/pentaho-kettle | core/src/org/pentaho/di/core/vfs/configuration/KettleFileSystemConfigBuilderFactory.java | Java | apache-2.0 | 3,605 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.