code
stringlengths
4
1.01M
class Sidplay < Cask url 'http://www.twinbirds.com/sidplay/SIDPLAY4.zip' homepage 'http://www.sidmusic.org/sidplay/mac/' version 'latest' no_checksum link 'SIDPLAY.app' end
class Gmtl < Formula desc "Lightweight math library" homepage "https://ggt.sourceforge.io/" head "https://svn.code.sf.net/p/ggt/code/trunk" stable do url "https://downloads.sourceforge.net/project/ggt/Generic%20Math%20Template%20Library/0.6.1/gmtl-0.6.1.tar.gz" sha256 "f7d8e6958d96a326cb732a9d3692a3ff3fd7df240eb1d0921a7c5c77e37fc434" # Build assumes that Python is a framework, which isn't always true. See: # https://sourceforge.net/p/ggt/bugs/22/ # The SConstruct from gmtl's HEAD doesn't need to be patched patch :DATA end bottle do cellar :any_skip_relocation sha256 "66ae5e3ccd2a0cbf4608b4ffee45bccb9c3be33148af25787c76652c1c0967ac" => :high_sierra sha256 "ee8d0c9f5f52453421a189c040459b5126a5b739231493a3e39d331c934c6478" => :sierra sha256 "8aa9f0f1fb77376dd333bb03e9c5a07f6457b76008a74018a932dca930148606" => :el_capitan sha256 "5e6d70f957f11e58d8b3cd24d5474a8bedc73e0aec6df13f85322f4fda8a1164" => :mavericks sha256 "ffeb26dd58a9b05a4427ca02392f93f9d5b352af790e536e4d2989baa81e4faf" => :mountain_lion sha256 "568a43df4aebd32ab9638d2725721b9c062bca0ecb778dbffb67fafd926d4a1a" => :lion end depends_on "scons" => :build # The scons script in gmtl only works for gcc, patch it # https://sourceforge.net/p/ggt/bugs/28/ patch do url "https://gist.githubusercontent.com/anonymous/c16cad998a4903e6b3a8/raw/e4669b3df0e14996c7b7b53937dd6b6c2cbc7c04/gmtl_Sconstruct.diff" sha256 "1167f89f52f88764080d5760b6d054036734b26c7fef474692ff82e9ead7eb3c" end def install scons "install", "prefix=#{prefix}" end end __END__ diff --git a/SConstruct b/SConstruct index 8326a89..2eb7ff0 100644 --- a/SConstruct +++ b/SConstruct @@ -126,7 +126,9 @@ def BuildDarwinEnvironment(): exp = re.compile('^(.*)\/Python\.framework.*$') m = exp.search(distutils.sysconfig.get_config_var('prefix')) - framework_opt = '-F' + m.group(1) + framework_opt = None + if m: + framework_opt = '-F' + m.group(1) CXX = os.environ.get("CXX", WhereIs('g++')) @@ -138,7 +140,10 @@ def BuildDarwinEnvironment(): LINK = CXX CXXFLAGS = ['-ftemplate-depth-256', '-DBOOST_PYTHON_DYNAMIC_LIB', - '-Wall', framework_opt, '-pipe'] + '-Wall', '-pipe'] + + if framework_opt is not None: + CXXFLAGS.append(framework_opt) compiler_ver = match_obj.group(1) compiler_major_ver = int(match_obj.group(2)) @@ -152,7 +157,10 @@ def BuildDarwinEnvironment(): CXXFLAGS += ['-Wno-long-double', '-no-cpp-precomp'] SHLIBSUFFIX = distutils.sysconfig.get_config_var('SO') - SHLINKFLAGS = ['-bundle', framework_opt, '-framework', 'Python'] + SHLINKFLAGS = ['-bundle'] + + if framework_opt is not None: + SHLINKFLAGS.extend([framework_opt, '-framework', 'Python']) LINKFLAGS = [] # Enable profiling?
class Simde < Formula desc "Implementations of SIMD intrinsics for systems which don't natively support them" homepage "https://github.com/simd-everywhere/simde" url "https://github.com/simd-everywhere/simde/archive/v0.7.2.tar.gz" sha256 "366d5e9a342c30f1e40d1234656fb49af5ee35590aaf53b3c79b2afb906ed4c8" license "MIT" head "https://github.com/simd-everywhere/simde.git", branch: "master" bottle do sha256 cellar: :any_skip_relocation, all: "2b76aa4bfc8e2fe4c0af7a594e7f25aba0575b4f0ca9babef7057215e9cafe74" end depends_on "meson" => :build depends_on "ninja" => :build def install mkdir("build") do system "meson", *std_meson_args, "-Dtests=false", ".." system "ninja", "-v" system "ninja", "install", "-v" end end test do (testpath/"test.c").write <<~EOS #include <assert.h> #include <simde/arm/neon.h> #include <simde/x86/sse2.h> int main() { int64_t a = 1, b = 2; assert(simde_vaddd_s64(a, b) == 3); simde__m128i z = simde_mm_setzero_si128(); simde__m128i v = simde_mm_undefined_si128(); v = simde_mm_xor_si128(v, v); assert(simde_mm_movemask_epi8(simde_mm_cmpeq_epi8(v, z)) == 0xFFFF); return 0; } EOS system ENV.cc, "test.c", "-o", "test" system "./test" end end
# If your issue was closed without review Both the [reporting bugs section of the readme](https://github.com/Homebrew/homebrew-cask#reporting-bugs) and [the bug report issue template you need to fill before opening an issue](https://github.com/Homebrew/homebrew-cask/blob/master/.github/ISSUE_TEMPLATE/01_bug_report.md) warn that if the instructions aren’t followed, your issue may be closed without review. You might have been redirected to this document if it was apparent to a maintainer that was the case. We worked hard on those guides to keep things running smoothly, so we ask you go back and follow them. It is OK to open a new issue for that. If the maintainer was wrong in closing your issue, please do reply stating why! Closing an issue does not mean the conversation is over. If the guides themselves were unclear, help us improve them! Open *first* an issue or pull request stating what you found confusing *and only then* your other issue. We understand sometimes users are tired and don’t want to sift through a guide to make what they feel is a simple bug report. But when each user ignores the guides to save a few minutes, maintainers lose hours and get fatigued. Dealing with the same solved and documented problems leads to maintainer burnout and wasted hours. Those would be better spent improving Homebrew-Cask itself and fixing real bugs. We ask you take that into consideration. Thank you for understanding and taking the time to make a correct report. The whole team appreciates it.
L.Map.include({ addControl: function (control) { control.addTo(this); return this; }, removeControl: function (control) { control.removeFrom(this); return this; }, _initControlPos: function () { var corners = this._controlCorners = {}, l = 'leaflet-', container = this._controlContainer = L.DomUtil.create('div', l + 'control-container', this._container); function createCorner(vSide, hSide) { var className = l + vSide + ' ' + l + hSide; corners[vSide + hSide] = L.DomUtil.create('div', className, container); } createCorner('top', 'left'); createCorner('top', 'right'); createCorner('bottom', 'left'); createCorner('bottom', 'right'); } });
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/renderer/service_worker/embedded_worker_devtools_agent.h" #include "content/child/child_thread.h" #include "content/common/devtools_messages.h" #include "content/renderer/render_thread_impl.h" #include "third_party/WebKit/public/platform/WebCString.h" #include "third_party/WebKit/public/platform/WebString.h" #include "third_party/WebKit/public/web/WebEmbeddedWorker.h" using blink::WebEmbeddedWorker; using blink::WebString; namespace content { EmbeddedWorkerDevToolsAgent::EmbeddedWorkerDevToolsAgent( blink::WebEmbeddedWorker* webworker, int route_id) : webworker_(webworker), route_id_(route_id) { RenderThreadImpl::current()->AddEmbeddedWorkerRoute(route_id_, this); } EmbeddedWorkerDevToolsAgent::~EmbeddedWorkerDevToolsAgent() { RenderThreadImpl::current()->RemoveEmbeddedWorkerRoute(route_id_); } bool EmbeddedWorkerDevToolsAgent::OnMessageReceived( const IPC::Message& message) { bool handled = true; IPC_BEGIN_MESSAGE_MAP(EmbeddedWorkerDevToolsAgent, message) IPC_MESSAGE_HANDLER(DevToolsAgentMsg_Attach, OnAttach) IPC_MESSAGE_HANDLER(DevToolsAgentMsg_Reattach, OnReattach) IPC_MESSAGE_HANDLER(DevToolsAgentMsg_Detach, OnDetach) IPC_MESSAGE_HANDLER(DevToolsAgentMsg_DispatchOnInspectorBackend, OnDispatchOnInspectorBackend) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() return handled; } void EmbeddedWorkerDevToolsAgent::OnAttach(const std::string& host_id) { webworker_->attachDevTools(WebString::fromUTF8(host_id)); } void EmbeddedWorkerDevToolsAgent::OnReattach(const std::string& host_id, const std::string& state) { webworker_->reattachDevTools(WebString::fromUTF8(host_id), WebString::fromUTF8(state)); } void EmbeddedWorkerDevToolsAgent::OnDetach() { webworker_->detachDevTools(); } void EmbeddedWorkerDevToolsAgent::OnDispatchOnInspectorBackend( const std::string& message) { webworker_->dispatchDevToolsMessage(WebString::fromUTF8(message)); } } // namespace content
# stdlib from collections import defaultdict import time # 3p import psutil # project from checks import AgentCheck from config import _is_affirmative from utils.platform import Platform DEFAULT_AD_CACHE_DURATION = 120 DEFAULT_PID_CACHE_DURATION = 120 ATTR_TO_METRIC = { 'thr': 'threads', 'cpu': 'cpu.pct', 'rss': 'mem.rss', 'vms': 'mem.vms', 'real': 'mem.real', 'open_fd': 'open_file_descriptors', 'r_count': 'ioread_count', # FIXME: namespace me correctly (6.x), io.r_count 'w_count': 'iowrite_count', # FIXME: namespace me correctly (6.x) io.r_bytes 'r_bytes': 'ioread_bytes', # FIXME: namespace me correctly (6.x) io.w_count 'w_bytes': 'iowrite_bytes', # FIXME: namespace me correctly (6.x) io.w_bytes 'ctx_swtch_vol': 'voluntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.voluntary 'ctx_swtch_invol': 'involuntary_ctx_switches', # FIXME: namespace me correctly (6.x), ctx_swt.involuntary } class ProcessCheck(AgentCheck): def __init__(self, name, init_config, agentConfig, instances=None): AgentCheck.__init__(self, name, init_config, agentConfig, instances) # ad stands for access denied # We cache the PIDs getting this error and don't iterate on them # more often than `access_denied_cache_duration` # This cache is for all PIDs so it's global, but it should # be refreshed by instance self.last_ad_cache_ts = {} self.ad_cache = set() self.access_denied_cache_duration = int( init_config.get( 'access_denied_cache_duration', DEFAULT_AD_CACHE_DURATION ) ) # By default cache the PID list for a while # Sometimes it's not wanted b/c it can mess with no-data monitoring # This cache is indexed per instance self.last_pid_cache_ts = {} self.pid_cache = {} self.pid_cache_duration = int( init_config.get( 'pid_cache_duration', DEFAULT_PID_CACHE_DURATION ) ) # Process cache, indexed by instance self.process_cache = defaultdict(dict) def should_refresh_ad_cache(self, name): now = time.time() return now - self.last_ad_cache_ts.get(name, 0) > self.access_denied_cache_duration def should_refresh_pid_cache(self, name): now = time.time() return now - self.last_pid_cache_ts.get(name, 0) > self.pid_cache_duration def find_pids(self, name, search_string, exact_match, ignore_ad=True): """ Create a set of pids of selected processes. Search for search_string """ if not self.should_refresh_pid_cache(name): return self.pid_cache[name] ad_error_logger = self.log.debug if not ignore_ad: ad_error_logger = self.log.error refresh_ad_cache = self.should_refresh_ad_cache(name) matching_pids = set() for proc in psutil.process_iter(): # Skip access denied processes if not refresh_ad_cache and proc.pid in self.ad_cache: continue found = False for string in search_string: try: # FIXME 6.x: All has been deprecated from the doc, should be removed if string == 'All': found = True if exact_match: if proc.name() == string: found = True else: cmdline = proc.cmdline() if string in ' '.join(cmdline): found = True except psutil.NoSuchProcess: self.log.warning('Process disappeared while scanning') except psutil.AccessDenied, e: ad_error_logger('Access denied to process with PID %s', proc.pid) ad_error_logger('Error: %s', e) if refresh_ad_cache: self.ad_cache.add(proc.pid) if not ignore_ad: raise else: if refresh_ad_cache: self.ad_cache.discard(proc.pid) if found: matching_pids.add(proc.pid) break self.pid_cache[name] = matching_pids self.last_pid_cache_ts[name] = time.time() if refresh_ad_cache: self.last_ad_cache_ts[name] = time.time() return matching_pids def psutil_wrapper(self, process, method, accessors, *args, **kwargs): """ A psutil wrapper that is calling * psutil.method(*args, **kwargs) and returns the result OR * psutil.method(*args, **kwargs).accessor[i] for each accessors given in a list, the result being indexed in a dictionary by the accessor name """ if accessors is None: result = None else: result = {} # Ban certain method that we know fail if method == 'memory_info_ex'\ and (Platform.is_win32() or Platform.is_solaris()): return result elif method == 'num_fds' and not Platform.is_unix(): return result try: res = getattr(process, method)(*args, **kwargs) if accessors is None: result = res else: for acc in accessors: try: result[acc] = getattr(res, acc) except AttributeError: self.log.debug("psutil.%s().%s attribute does not exist", method, acc) except (NotImplementedError, AttributeError): self.log.debug("psutil method %s not implemented", method) except psutil.AccessDenied: self.log.debug("psutil was denied acccess for method %s", method) except psutil.NoSuchProcess: self.warning("Process {0} disappeared while scanning".format(process.pid)) return result def get_process_state(self, name, pids): st = defaultdict(list) # Remove from cache the processes that are not in `pids` cached_pids = set(self.process_cache[name].keys()) pids_to_remove = cached_pids - pids for pid in pids_to_remove: del self.process_cache[name][pid] for pid in pids: st['pids'].append(pid) new_process = False # If the pid's process is not cached, retrieve it if pid not in self.process_cache[name] or not self.process_cache[name][pid].is_running(): new_process = True try: self.process_cache[name][pid] = psutil.Process(pid) self.log.debug('New process in cache: %s' % pid) # Skip processes dead in the meantime except psutil.NoSuchProcess: self.warning('Process %s disappeared while scanning' % pid) # reset the PID cache now, something changed self.last_pid_cache_ts[name] = 0 continue p = self.process_cache[name][pid] meminfo = self.psutil_wrapper(p, 'memory_info', ['rss', 'vms']) st['rss'].append(meminfo.get('rss')) st['vms'].append(meminfo.get('vms')) # will fail on win32 and solaris shared_mem = self.psutil_wrapper(p, 'memory_info_ex', ['shared']).get('shared') if shared_mem is not None and meminfo.get('rss') is not None: st['real'].append(meminfo['rss'] - shared_mem) else: st['real'].append(None) ctxinfo = self.psutil_wrapper(p, 'num_ctx_switches', ['voluntary', 'involuntary']) st['ctx_swtch_vol'].append(ctxinfo.get('voluntary')) st['ctx_swtch_invol'].append(ctxinfo.get('involuntary')) st['thr'].append(self.psutil_wrapper(p, 'num_threads', None)) cpu_percent = self.psutil_wrapper(p, 'cpu_percent', None) if not new_process: # psutil returns `0.` for `cpu_percent` the first time it's sampled on a process, # so save the value only on non-new processes st['cpu'].append(cpu_percent) st['open_fd'].append(self.psutil_wrapper(p, 'num_fds', None)) ioinfo = self.psutil_wrapper(p, 'io_counters', ['read_count', 'write_count', 'read_bytes', 'write_bytes']) st['r_count'].append(ioinfo.get('read_count')) st['w_count'].append(ioinfo.get('write_count')) st['r_bytes'].append(ioinfo.get('read_bytes')) st['w_bytes'].append(ioinfo.get('write_bytes')) return st def check(self, instance): name = instance.get('name', None) tags = instance.get('tags', []) exact_match = _is_affirmative(instance.get('exact_match', True)) search_string = instance.get('search_string', None) ignore_ad = _is_affirmative(instance.get('ignore_denied_access', True)) if not isinstance(search_string, list): raise KeyError('"search_string" parameter should be a list') # FIXME 6.x remove me if "All" in search_string: self.warning('Deprecated: Having "All" in your search_string will' 'greatly reduce the performance of the check and ' 'will be removed in a future version of the agent.') if name is None: raise KeyError('The "name" of process groups is mandatory') if search_string is None: raise KeyError('The "search_string" is mandatory') pids = self.find_pids( name, search_string, exact_match, ignore_ad=ignore_ad ) proc_state = self.get_process_state(name, pids) # FIXME 6.x remove the `name` tag tags.extend(['process_name:%s' % name, name]) self.log.debug('ProcessCheck: process %s analysed', name) self.gauge('system.processes.number', len(pids), tags=tags) for attr, mname in ATTR_TO_METRIC.iteritems(): vals = [x for x in proc_state[attr] if x is not None] # skip [] if vals: # FIXME 6.x: change this prefix? self.gauge('system.processes.%s' % mname, sum(vals), tags=tags) self._process_service_check(name, len(pids), instance.get('thresholds', None)) def _process_service_check(self, name, nb_procs, bounds): ''' Report a service check, for each process in search_string. Report as OK if the process is in the warning thresholds CRITICAL out of the critical thresholds WARNING out of the warning thresholds ''' tag = ["process:%s" % name] status = AgentCheck.OK message_str = "PROCS %s: %s processes found for %s" status_str = { AgentCheck.OK: "OK", AgentCheck.WARNING: "WARNING", AgentCheck.CRITICAL: "CRITICAL" } if not bounds and nb_procs < 1: status = AgentCheck.CRITICAL elif bounds: warning = bounds.get('warning', [1, float('inf')]) critical = bounds.get('critical', [1, float('inf')]) if warning[1] < nb_procs or nb_procs < warning[0]: status = AgentCheck.WARNING if critical[1] < nb_procs or nb_procs < critical[0]: status = AgentCheck.CRITICAL self.service_check( "process.up", status, tags=tag, message=message_str % (status_str[status], nb_procs, name) )
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <title>DSpace Documentation : Application Layer</title> <link rel="stylesheet" href="styles/site.css" type="text/css" /> <META http-equiv="Content-Type" content="text/html; charset=UTF-8"> </head> <body> <table class="pagecontent" border="0" cellpadding="0" cellspacing="0" width="100%" bgcolor="#ffffff"> <tr> <td valign="top" class="pagebody"> <div class="pageheader"> <span class="pagetitle"> DSpace Documentation : Application Layer </span> </div> <div class="pagesubheading"> This page last changed on Feb 17, 2011 by <font color="#0050B2">helix84</font>. </div> <h1><a name="ApplicationLayer-SystemArchitecture%3AApplicationLayer"></a>System Architecture: Application Layer</h1> <p>The following explains how the application layer is built and used.</p> <style type='text/css'>/*<![CDATA[*/ div.rbtoc1297951722587 {margin-left: 0px;padding: 0px;} div.rbtoc1297951722587 ul {list-style: none;margin-left: 0px;} div.rbtoc1297951722587 li {margin-left: 0px;padding-left: 0px;} /*]]>*/</style><div class='rbtoc1297951722587'> <ul> <li><span class='TOCOutline'>1</span> <a href='#ApplicationLayer-WebUserInterface'>Web User Interface</a></li> <ul> <li><span class='TOCOutline'>1.1</span> <a href='#ApplicationLayer-WebUIFiles'>Web UI Files</a></li> <li><span class='TOCOutline'>1.2</span> <a href='#ApplicationLayer-TheBuildProcess'>The Build Process</a></li> <li><span class='TOCOutline'>1.3</span> <a href='#ApplicationLayer-ServletsandJSPs%28JSPUIOnly%29'>Servlets and JSPs (JSPUI Only)</a></li> <li><span class='TOCOutline'>1.4</span> <a href='#ApplicationLayer-CustomJSPTags%28JSPUIOnly%29'>Custom JSP Tags (JSPUI Only)</a></li> <li><span class='TOCOutline'>1.5</span> <a href='#ApplicationLayer-Internationalization%28JSPUIOnly%29'>Internationalization (JSPUI Only)</a></li> <ul> <li><span class='TOCOutline'>1.5.1</span> <a href='#ApplicationLayer-MessageKeyConvention'>Message Key Convention</a></li> <li><span class='TOCOutline'>1.5.2</span> <a href='#ApplicationLayer-WhichLanguagesarecurrentlysupported%3F'>Which Languages are currently supported?</a></li> </ul> <li><span class='TOCOutline'>1.6</span> <a href='#ApplicationLayer-HTMLContentinItems'>HTML Content in Items</a></li> <li><span class='TOCOutline'>1.7</span> <a href='#ApplicationLayer-ThesisBlocking'>Thesis Blocking</a></li> </ul> <li><span class='TOCOutline'>2</span> <a href='#ApplicationLayer-OAIPMHDataProvider'>OAI-PMH Data Provider</a></li> <ul> <li><span class='TOCOutline'>2.1</span> <a href='#ApplicationLayer-Sets'>Sets</a></li> <li><span class='TOCOutline'>2.2</span> <a href='#ApplicationLayer-UniqueIdentifier'>Unique Identifier</a></li> <li><span class='TOCOutline'>2.3</span> <a href='#ApplicationLayer-Accesscontrol'>Access control</a></li> <li><span class='TOCOutline'>2.4</span> <a href='#ApplicationLayer-ModificationDate%28OAIDateStamp%29'>Modification Date (OAI Date Stamp)</a></li> <li><span class='TOCOutline'>2.5</span> <a href='#ApplicationLayer-%27About%27Information'>'About' Information</a></li> <li><span class='TOCOutline'>2.6</span> <a href='#ApplicationLayer-Deletions'>Deletions</a></li> <li><span class='TOCOutline'>2.7</span> <a href='#ApplicationLayer-FlowControl%28ResumptionTokens%29'>Flow Control (Resumption Tokens)</a></li> </ul> <li><span class='TOCOutline'>3</span> <a href='#ApplicationLayer-DSpaceCommandLauncher'>DSpace Command Launcher</a></li> <ul> <li><span class='TOCOutline'>3.1</span> <a href='#ApplicationLayer-OlderVersions'>Older Versions</a></li> <li><span class='TOCOutline'>3.2</span> <a href='#ApplicationLayer-CommandLauncherStructure'>Command Launcher Structure</a></li> </ul> </ul></div> <h2><a name="ApplicationLayer-WebUserInterface"></a>Web User Interface</h2> <p>The DSpace Web UI is the largest and most-used component in the application layer. Built on Java Servlet and JavaServer Page technology, it allows end-users to access DSpace over the Web via their Web browsers. As of Dspace 1.3.2 the UI meets both XHTML 1.0 standards and Web Accessibility Initiative (WAI) level-2 standard.</p> <p>It also features an administration section, consisting of pages intended for use by central administrators. Presently, this part of the Web UI is not particularly sophisticated; users of the administration section need to know what they are doing&#33; Selected parts of this may also be used by collection administrators.</p> <h3><a name="ApplicationLayer-WebUIFiles"></a>Web UI Files</h3> <p>The Web UI-related files are located in a variety of directories in the DSpace source tree. Note that as of DSpace version 1.5, the deployment has changed. The build systems has moved to a maven-based system enabling the various projects (JSPUI, XMLUI, etc.) into separate projects. The system still uses the familar 'Ant' to deploy the webapps in later stages.</p> <div class='table-wrap'> <table class='confluenceTable'><tbody> <tr> <td class='confluenceTd'> <b>Location</b> </td> <td class='confluenceTd'> <b>Description</b> </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/webui</em> </td> <td class='confluenceTd'> Web UI source files </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/filters</em> </td> <td class='confluenceTd'> Servlet Filters (Servlet 2.3 spec) </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/jsptag</em> </td> <td class='confluenceTd'> Custom JSP tag class files </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/servlet</em> </td> <td class='confluenceTd'> Servlets for main Web UI (controllers) </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/servlet/admin</em> </td> <td class='confluenceTd'> Servlets that comprise the administration part of the Web UI </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-api/src/main/java/org/dspace/app/webui/util/</em> </td> <td class='confluenceTd'> Miscellaneous classes used by the servlets and filters </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui</em> </td> <td class='confluenceTd'> The JSP files </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace/modules/jspui/src/main/webapp</em> </td> <td class='confluenceTd'> This is where you place customized versions of JSPs—see 6. JSPUI Configuration and Customization </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace/modules/xmlui/src/main/webapp</em> </td> <td class='confluenceTd'> This is where you place customizations for the Manakin interface—see 7. Manakin [XMLUI] Configuration and Customization </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source/dspace/modules/jspui/src/main/resources</em> </td> <td class='confluenceTd'> This is where you can place you customize version of the <em>Messages.properties</em> file. </td> </tr> <tr> <td class='confluenceTd'> <em>[dspace-source]/dspace-jspui/dspace-jspui-webapp/src/main/webapp/WEB-INF/dspace-tags.tld</em> </td> <td class='confluenceTd'> Custom DSpace JSP tag descriptor </td> </tr> </tbody></table> </div> <h3><a name="ApplicationLayer-TheBuildProcess"></a>The Build Process</h3> <p>The DSpace Maven build process constructs a full DSpace installation template directory structure containing a series of web applications. The results are placed in <em>[dspace-source]/dspace/target/dspace-[version]-build.dir/</em>. The process works as follows:</p> <ul> <li>All the DSpace source code is compiled, and/or automatically downloaded from the Maven Central code/libraries repository.</li> <li>A full DSpace "installation template" folder is built in <tt>[dspace-source]/dspace/target/dspace-[version]-build.dir/</tt> <ul> <li>This DSpace "installation template" folder has a structure identical to the <a href="Directories.html#Directories-InstalledDirectoryLayout">Installed Directory Layout</a></li> </ul> </li> </ul> <p>In order to then install &amp; deploy DSpace from this "installation template" folder, you must run the following from <tt>[dspace-source]/dspace/target/dspace-[version]-build.dir/</tt> :</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">ant -D [dspace]/config/dspace.cfg update</pre> </div></div> <p>Please see the <a href="Installation.html" title="Installation">Installation</a> instructions for more details about the Installation process.</p> <h3><a name="ApplicationLayer-ServletsandJSPs%28JSPUIOnly%29"></a>Servlets and JSPs (JSPUI Only)</h3> <p>The JSPUI Web UI is loosely based around the MVC (model, view, controller) model. The content management API corresponds to the model, the Java Servlets are the controllers, and the JSPs are the views. Interactions take the following basic form:</p> <ol> <li>An HTTP request is received from a browser</li> <li>The appropriate servlet is invoked, and processes the request by invoking the DSpace business logic layer public API</li> <li>Depending on the outcome of the processing, the servlet invokes the appropriate JSP</li> <li>The JSP is processed and sent to the browser<br/> The reasons for this approach are:</li> </ol> <ul> <li>All of the processing is done before the JSP is invoked, so any error or problem that occurs does not occur halfway through HTML rendering</li> <li>The JSPs contain as little code as possible, so they can be customized without having to delve into Java code too much<br/> The <em>org.dspace.app.webui.servlet.LoadDSpaceConfig</em> servlet is always loaded first. This is a very simple servlet that checks the <em>dspace-config</em> context parameter from the DSpace deployment descriptor, and uses it to locate <em>dspace.cfg</em>. It also loads up the Log4j configuration. It's important that this servlet is loaded first, since if another servlet is loaded up, it will cause the system to try and load DSpace and Log4j configurations, neither of which would be found.</li> </ul> <p>All DSpace servlets are subclasses of the <em>DSpaceServlet</em> class. The <em>DSpaceServlet</em> class handles some basic operations such as creating a DSpace <em>Context</em> object (opening a database connection etc.), authentication and error handling. Instead of overriding the <em>doGet</em> and <em>doPost</em> methods as one normally would for a servlet, DSpace servlets implement <em>doDSGet</em> or <em>doDSPost</em> which have an extra context parameter, and allow the servlet to throw various exceptions that can be handled in a standard way.</p> <p>The DSpace servlet processes the contents of the HTTP request. This might involve retrieving the results of a search with a query term, accessing the current user's eperson record, or updating a submission in progress. According to the results of this processing, the servlet must decide which JSP should be displayed. The servlet then fills out the appropriate attributes in the <em>HttpRequest</em> object that represents the HTTP request being processed. This is done by invoking the <em>setAttribute</em> method of the <em>javax.servlet.http.HttpServletRequest</em> object that is passed into the servlet from Tomcat. The servlet then forwards control of the request to the appropriate JSP using the <em>JSPManager.showJSP</em> method.</p> <p>The <em>JSPManager.showJSP</em> method uses the standard Java servlet forwarding mechanism is then used to forward the HTTP request to the JSP. The JSP is processed by Tomcat and the results sent back to the user's browser.</p> <p>There is an exception to this servlet/JSP style: <em>index.jsp</em>, the 'home page', receives the HTTP request directly from Tomcat without a servlet being invoked first. This is because in the servlet 2.3 specification, there is no way to map a servlet to handle only requests made to '<em>/</em>'; such a mapping results in every request being directed to that servlet. By default, Tomcat forwards requests to '<em>/</em>' to <em>index.jsp</em>. To try and make things as clean as possible, <em>index.jsp</em> contains some simple code that would normally go in a servlet, and then forwards to <em>home.jsp</em> using the <em>JSPManager.showJSP</em> method. This means localized versions of the 'home page' can be created by placing a customized <em>home.jsp</em> in <em>[dspace-source]/jsp/local</em>, in the same manner as other JSPs.</p> <p><em>[dspace-source]/jsp/dspace-admin/index.jsp</em>, the administration UI index page, is invoked directly by Tomcat and not through a servlet for similar reasons.</p> <p>At the top of each JSP file, right after the license and copyright header, is documented the appropriate attributes that a servlet must fill out prior to forwarding to that JSP. No validation is performed; if the servlet does not fill out the necessary attributes, it is likely that an internal server error will occur.</p> <p>Many JSPs containing forms will include hidden parameters that tell the servlets which form has been filled out. The submission UI servlet (<em>SubmissionController</em> is a prime example of a servlet that deals with the input from many different JSPs. The <em>step</em> and <em>page</em> hidden parameters (written out by the <em>SubmissionController.getSubmissionParameters()</em> method) are used to inform the servlet which page of which step has just been filled out (i.e. which page of the submission the user has just completed).</p> <p>Below is a detailed, scary diagram depicting the flow of control during the whole process of processing and responding to an HTTP request. More information about the authentication mechanism is mostly described in the configuration section.</p> <p><span class="image-wrap" style=""><img src="attachments/22022819/21954860.gif" style="border: 0px solid black"/></span></p> <p>Flow of Control During HTTP Request Processing</p> <h3><a name="ApplicationLayer-CustomJSPTags%28JSPUIOnly%29"></a>Custom JSP Tags (JSPUI Only)</h3> <p>The DSpace JSPs all use some custom tags defined in <em>/dspace/jsp/WEB-INF/dspace-tags.tld</em>, and the corresponding Java classes reside in <em>org.dspace.app.webui.jsptag</em>. The tags are listed below. The <em>dspace-tags.tld</em> file contains detailed comments about how to use the tags, so that information is not repeated here.</p> <ul> <li><b><em>layout</em></b>: Just about every JSP uses this tag. It produces the standard HTML header and <em>&lt;BODY&gt;_tag. Thus the content of each JSP is nested inside a &#95;&lt;dspace:layout&gt;</em> tag. The (XML-style)attributes of this tag are slightly complicated--see <em>dspace-tags.tld</em>. The JSPs in the source code bundle also provide plenty of examples.</li> <li><b><em>sidebar</em></b>: Can only be used inside a <em>layout</em> tag, and can only be used once per JSP. The content between the start and end <em>sidebar</em> tags is rendered in a column on the right-hand side of the HTML page. The contents can contain further JSP tags and Java 'scriptlets'.</li> <li><b><em>date</em></b>: Displays the date represented by an <em>org.dspace.content.DCDate</em> object. Just the one representation of date is rendered currently, but this could use the user's browser preferences to display a localized date in the future.</li> <li><b><em>include</em></b>: Obsolete, simple tag, similar to <em>jsp:include</em>. In versions prior to DSpace 1.2, this tag would use the locally modified version of a JSP if one was installed in jsp/local. As of 1.2, the build process now performs this function, however this tag is left in for backwards compatibility.</li> <li><b><em>item</em></b>: Displays an item record, including Dublin Core metadata and links to the bitstreams within it. Note that the displaying of the bitstream links is simplistic, and does not take into account any of the bundling structure. This is because DSpace does not have a fully-fledged dissemination architectural piece yet. Displaying an item record is done by a tag rather than a JSP for two reasons: Firstly, it happens in several places (when verifying an item record during submission or workflow review, as well as during standard item accesses), and secondly, displaying the item turns out to be mostly code-work rather than HTML anyway. Of course, the disadvantage of doing it this way is that it is slightly harder to customize exactly what is displayed from an item record; it is necessary to edit the tag code (<em>org.dspace.app.webui.jsptag.ItemTag</em>). Hopefully a better solution can be found in the future.</li> <li><b><em>itemlist</em></b><b>,</b> <b><em>collectionlist</em></b><b>,</b> <b><em>communitylist</em></b>: These tags display ordered sequences of items, collections and communities, showing minimal information but including a link to the page containing full details. These need to be used in HTML tables.</li> <li><b><em>popup</em></b>: This tag is used to render a link to a pop-up page (typically a help page.) If Javascript is available, the link will either open or pop to the front any existing DSpace pop-up window. If Javascript is not available, a standard HTML link is displayed that renders the link destination in a window named '<em>dspace.popup</em>'. In graphical browsers, this usually opens a new window or re-uses an existing window of that name, but if a window is re-used it is not 'raised' which might confuse the user. In text browsers, following this link will simply replace the current page with the destination of the link. This obviously means that Javascript offers the best functionality, but other browsers are still supported.</li> <li><b><em>selecteperson</em></b>: A tag which produces a widget analogous to HTML <em>&lt;SELECT&gt;</em>, that allows a user to select one or multiple e-people from a pop-up list.</li> <li><b><em>sfxlink</em></b>: Using an item's Dublin Core metadata DSpace can display an SFX link, if an SFX server is available. This tag does so for a particular item if the <em>sfx.server.url</em> property is defined in <em>dspace.cfg</em>.</li> </ul> <h3><a name="ApplicationLayer-Internationalization%28JSPUIOnly%29"></a>Internationalization (JSPUI Only)</h3> <div class='panelMacro'><table class='infoMacro'><colgroup><col width='24'><col></colgroup><tr><td valign='top'><img src="images/icons/emoticons/information.gif" width="16" height="16" align="absmiddle" alt="" border="0"></td><td><b>XMLUI Internationalization</b><br />For information about XMLUI Internationalization please see: <a href="XMLUI Configuration and Customization.html#XMLUIConfigurationandCustomization-MultilingualSupport">XMLUI Multilingual Support</a>.</td></tr></table></div> <p>The <a href="http://jakarta.apache.org/taglibs/doc/standard-1.0-doc/intro.html" title="Java Standard Tag Library v1.0">Java Standard Tag Library v1.0</a> is used to specify messages in the JSPs like this:</p> <p>OLD:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;H1&gt;Search Results&lt;/H1&gt;</pre> </div></div> <p>NEW:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;H1&gt;&lt;fmt:message key=<span class="code-quote">"jsp.search.results.title"</span>/&gt;&lt;/H1&gt;</pre> </div></div> <p>This message can now be changed using the <em>config/language-packs/Messages.properties</em> file. (This must be done at build-time: <em>Messages.properties</em> is placed in the <em>dspace.war</em> Web application file.)</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.search.results.title = Search Results</pre> </div></div> <p>Phrases may have parameters to be passed in, to make the job of translating easier, reduce the number of 'keys' and to allow translators to make the translated text flow more appropriately for the target language.</p> <p>OLD:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;P&gt;Results &lt;%= r.getFirst() %&gt; to &lt;%= r.getLast() %&gt; of &lt;%=r.getTotal() %&gt;&lt;/P&gt;</pre> </div></div> <p>NEW:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;fmt:message key=<span class="code-quote">"jsp.search.results.text"</span>&gt; &lt;fmt:param&gt;&lt;%= r.getFirst() %&gt;&lt;/fmt:param&gt; &lt;fmt:param&gt;&lt;%= r.getLast() %&gt;&lt;/fmt:param&gt; &lt;fmt:param&gt;&lt;%= r.getTotal() %&gt;&lt;/fmt:param&gt; &lt;/fmt:message&gt;</pre> </div></div> <p>(Note: JSTL 1.0 does not seem to allow JSP &lt;%= %&gt; expressions to be passed in as values of attribute in &lt;fmt:param value=""/&gt;)</p> <p>The above would appear in the <em>Messages_xx.properties</em> file as:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.search.results.text = Results {0}-{1} of {2} </pre> </div></div> <p>Introducing number parameters that should be formatted according to the locale used makes no difference in the message key compared to string parameters:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.submit.show-uploaded-file.size-in-bytes = {0} bytes</pre> </div></div> <p>In the JSP using this key can be used in the way belov:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;fmt:message key=<span class="code-quote">"jsp.submit.show-uploaded-file.size-in-bytes"</span>&gt; &lt;fmt:param&gt;&lt;fmt:formatNumber&gt;&lt;%= bitstream.getSize()%&gt;&lt;/fmt:formatNumber&gt;&lt;/fmt:param&gt; &lt;/fmt:message&gt; </pre> </div></div> <p>(Note: JSTL offers a way to include numbers in the message keys as <em>jsp.foo.key = {0,number} bytes</em>. Setting the parameter as <em>&lt;fmt:param value="${variable}" /&gt;</em> workes when <em>variable</em> is a single variable name and doesn't work when trying to use a method's return value instead: <em>bitstream.getSize()</em>. Passing the number as string (or using the &lt;%= %&gt; expression) also does not work.)</p> <p>Multiple <em>Messages.properties</em> can be created for different languages. See <a href="http://java.sun.com/j2se/1.4.2/docs/api/java/util/ResourceBundle.html#getBundle(java.lang.String,%20java.util.Locale,%20java.lang.ClassLoader)" title="ResourceBundle.getBundle">ResourceBundle.getBundle</a>. e.g. you can add German and Canadian French translations:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">Messages_de.properties Messages_fr_CA.properties</pre> </div></div> <p>The end user's browser settings determine which language is used. The English language file <em>Messages.properties</em> (or the default server locale) will be used as a default if there's no language bundle for the end user's preferred language. (Note that the English file is not called <em>Messages_en.properties</em> &#8211; this is so it is always available as a default, regardless of server configuration.)</p> <p>The <em>dspace:layout</em> tag has been updated to allow dictionary keys to be passed in for the titles. It now has two new parameters: <em>titlekey</em> and <em>parenttitlekey</em>. So where before you'd do:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;dspace:layout title=<span class="code-quote">"Here"</span> parentlink=<span class="code-quote">"/mydspace"</span> parenttitle=<span class="code-quote">"My DSpace"</span>&gt; </pre> </div></div> <p>You now do:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;dspace:layout titlekey=<span class="code-quote">"jsp.page.title"</span> parentlink=<span class="code-quote">"/mydspace"</span> parenttitlekey=<span class="code-quote">"jsp.mydspace"</span>&gt; </pre> </div></div> <p>And so the layout tag itself gets the relevant stuff out of the dictionary. <em>title</em> and <em>parenttitle</em> still work as before for backwards compatibility, and the odd spot where that's preferable.</p> <h4><a name="ApplicationLayer-MessageKeyConvention"></a>Message Key Convention</h4> <p>When translating further pages, please follow the convention for naming message keys to avoid clashes.</p> <p><b>For text in JSPs</b> use the complete path + filename of the JSP, then a one-word name for the message. e.g. for the title of <em>jsp/mydspace/main.jsp</em> use:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.mydspace.main.title</pre> </div></div> <p>Some common words (e.g. "Help") can be brought out into keys starting <em>jsp.</em> for ease of translation, e.g.:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.admin = Administer</pre> </div></div> <p>Other common words/phrases are brought out into 'general' parameters if they relate to a set (directory) of JSPs, e.g.</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.tools.general.delete = Delete</pre> </div></div> <p>Phrases that relate <b>strongly</b> to a topic (eg. MyDSpace) but used in many JSPs outside the particular directory are more convenient to be cross-referenced. For example one could use the key below in <em>jsp/submit/saved.jsp</em> to provide a link back to the user's <em>MyDSpace</em>:</p> <p><em>(Cross-referencing of keys</em> <b><em>in general</em></b> <em>is not a good idea as it may make maintenance more difficult. But in some cases it has more advantages as the meaning is obvious.)</em></p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">jsp.mydspace.general.<span class="code-keyword">goto</span>-mydspace = Go to My DSpace</pre> </div></div> <p><b>For text in servlet code</b>, in custom JSP tags or wherever applicable use the fully qualified classname + a one-word name for the message. e.g.</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">org.dspace.app.webui.jsptag.ItemListTag.title = Title</pre> </div></div> <h4><a name="ApplicationLayer-WhichLanguagesarecurrentlysupported%3F"></a>Which Languages are currently supported?</h4> <p>To view translations currently being developed, please refer to the <a href="http://wiki.dspace.org/I18nSupport" title="i18n page">i18n page</a> of the DSpace Wiki.</p> <h3><a name="ApplicationLayer-HTMLContentinItems"></a>HTML Content in Items</h3> <p>For the most part, the DSpace item display just gives a link that allows an end-user to download a bitstream. However, if a bundle has a primary bitstream whose format is of MIME type <em>text/html</em>, instead a link to the HTML servlet is given.</p> <p>So if we had an HTML document like this:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">contents.html chapter1.html chapter2.html chapter3.html figure1.gif figure2.jpg figure3.gif figure4.jpg figure5.gif figure6.gif</pre> </div></div> <p>The Bundle's primary bitstream field would point to the contents.html Bitstream, which we know is HTML (check the format MIME type) and so we know which to serve up first.</p> <p>The HTML servlet employs a trick to serve up HTML documents without actually modifying the HTML or other files themselves. Say someone is looking at <em>contents.html</em> from the above example, the URL in their browser will look like this:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">https:<span class="code-comment">//dspace.mit.edu/html/1721.1/12345/contents.html</span></pre> </div></div> <p>If there's an image called <em>figure1.gif</em> in that HTML page, the browser will do HTTP GET on this URL:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">https:<span class="code-comment">//dspace.mit.edu/html/1721.1/12345/figure1.gif</span></pre> </div></div> <p>The HTML document servlet can work out which item the user is looking at, and then which Bitstream in it is called <em>figure1.gif</em>, and serve up that bitstream. Similar for following links to other HTML pages. Of course all the links and image references have to be relative and not absolute.</p> <p>HTML documents must be "self-contained", as explained here. Provided that full path information is known by DSpace, any depth or complexity of HTML document can be served subject to those constraints. This is usually possible with some kind of batch import. If, however, the document has been uploaded one file at a time using the Web UI, the path information has been stripped. The system can cope with relative links that refer to a deeper path, e.g.</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;IMG SRC=<span class="code-quote">"images/figure1.gif"</span>&gt;</pre> </div></div> <p>If the item has been uploaded via the Web submit UI, in the Bitstream table in the database we have the 'name' field, which will contain the filename with no path (<em>figure1.gif</em>). We can still work out what <em>images/figure1.gif</em> is by making the HTML document servlet strip any path that comes in from the URL, e.g.</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">https:<span class="code-comment">//dspace.mit.edu/html/1721.1/12345/images/figure1.gif </span> ^^^^^^^ Strip <span class="code-keyword">this</span></pre> </div></div> <p>BUT all the filenames (regardless of directory names) must be unique. For example, this wouldn't work:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">contents.html chapter1.html chapter2.html chapter1_images/figure.gif chapter2_images/figure.gif</pre> </div></div> <p>since the HTML document servlet wouldn't know which bitstream to serve up for:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">https:<span class="code-comment">//dspace.mit.edu/html/1721.1/12345/chapter1_images/figure.gif </span>https:<span class="code-comment">//dspace.mit.edu/html/1721.1/12345/chapter2_images/figure.gif</span></pre> </div></div> <p>since it would just have <em>figure.gif</em></p> <p>To prevent "infinite URL spaces" appearing (e.g. if a file <em>foo.html</em> linked to <em>bar/foo.html</em>, which would link to <em>bar/bar/foo.html</em>...) this behavior can be configured by setting the configuration property <em>webui.html.max-depth-guess</em>.</p> <p>For example, if we receive a request for <em>foo/bar/index.html</em>, and we have a bitstream called just <em>index.html</em>, we will serve up that bitstream for the request if <em>webui.html.max-depth-guess</em> is 2 or greater. If <em>webui.html.max-depth-guess</em> is 1 or less, we would not serve that bitstream, as the depth of the file is greater. If <em>webui.html.max-depth-guess</em> is zero, the request filename and path must always exactly match the bitstream name. The default value (if that property is not present in <em>dspace.cfg</em>) is 3.</p> <h3><a name="ApplicationLayer-ThesisBlocking"></a>Thesis Blocking</h3> <p>The submission UI has an optional feature that came about as a result of MIT Libraries policy. If the <em>block.theses</em> parameter in <em>dspace.cfg</em> is <em>true</em>, an extra checkbox is included in the first page of the submission UI. This asks the user if the submission is a thesis. If the user checks this box, the submission is halted (deleted) and an error message displayed, explaining that DSpace should not be used to submit theses. This feature can be turned off and on, and the message displayed (<em>/dspace/jsp/submit/no-theses.jsp</em> can be localized as necessary.</p> <h2><a name="ApplicationLayer-OAIPMHDataProvider"></a>OAI-PMH Data Provider</h2> <p>The DSpace platform supports the <a href="http://www.openarchives.org/" title="Open Archives Initiative Protocol for Metadata Harvesting">Open Archives Initiative Protocol for Metadata Harvesting</a> (OAI-PMH) version 2.0 as a data provider. This is accomplished using the <a href="http://www.oclc.org/research/software/oai/cat.shtm" title="OAICat framework from OCLC">OAICat framework from OCLC</a>.</p> <p>The DSpace build process builds a Web application archive, <em>[dspace-source]/build/oai.war</em>), in much the same way as the Web UI build process described above. The only differences are that the JSPs are not included, and <em>[dspace-source]/etc/oai-web.xml</em> is used as the deployment descriptor. This 'webapp' is deployed to receive and respond to OAI-PMH requests via HTTP. Note that typically it should <em>not</em> be deployed on SSL (<em>https:</em> protocol). In a typical configuration, this is deployed at <em>oai</em>, for example:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java"> http:<span class="code-comment">//dspace.myu.edu/oai/request?verb=Identify</span> </pre> </div></div> <p>The 'base URL' of this DSpace deployment would be:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java"> http:<span class="code-comment">//dspace.myu.edu/oai/request</span> </pre> </div></div> <p>It is this URL that should be registered with <a href="http://www.openarchives.org/" title="www.openarchives.org">www.openarchives.org</a>. Note that you can easily change the '<em>request</em>' portion of the URL by editing <em>[dspace-source]/etc/oai-web.xml</em> and rebuilding and deploying <em>oai.war</em>.</p> <p>DSpace provides implementations of the OAICat interfaces <em>AbstractCatalog</em>, <em>RecordFactory</em> and <em>Crosswalk</em> that interface with the DSpace content management API and harvesting API (in the search subsystem).</p> <p>Only the basic <em>oai_dc</em> unqualified Dublin Core metadata set export is enabled by default; this is particularly easy since all items have qualified Dublin Core metadata. When this metadata is harvested, the qualifiers are simply stripped; for example, <em>description.abstract</em> is exposed as unqualified <em>description</em>. The <em>description.provenance</em> field is hidden, as this contains private information about the submitter and workflow reviewers of the item, including their e-mail addresses. Additionally, to keep in line with OAI community practices, values of <em>contributor.author</em> are exposed as <em>creator</em> values.</p> <p>Other metadata formats are supported as well, using other <em>Crosswalk</em> implementations; consult the <em>oaicat.properties</em> file described below. To enable a format, simply uncomment the lines beginning with <em>Crosswalks.&#42;</em>. Multiple formats are allowed, and the current list includes, in addition to unqualified DC: MPEG DIDL, METS, MODS. There is also an incomplete, experimental qualified DC.</p> <p>Note that the current simple DC implementation (<em>org.dspace.app.oai.OAIDCCrosswalk</em>) does not currently strip out any invalid XML characters that may be lying around in the data. If your database contains a DC value with, for example, some ASCII control codes (form feed etc.) this may cause OAI harvesters problems. This should rarely occur, however. XML entities (such as <em>&gt;</em>) are encoded (e.g. to <em>&gt;</em>)</p> <p>In addition to the implementations of the OAICat interfaces, there is one main configuration file relevant to OAI-PMH support:</p> <ul> <li><b>oaicat.properties</b>: This file resides in <tt>[dspace]/config</tt>. You probably won't need to edit this, as it is pre-configured to meet most needs. You might want to change the <tt>Identify.earliestDatestamp</tt> field to more accurately reflect the oldest datestamp in your local DSpace system. (Note that this is the value of the <tt>last_modified</tt> column in the <tt>Item</tt> database table.)</li> </ul> <h3><a name="ApplicationLayer-Sets"></a>Sets</h3> <p>OAI-PMH allows repositories to expose an hierarchy of sets in which records may be placed. A record can be in zero or more sets.</p> <p>DSpace exposes collections as sets. The organization of communities is likely to change over time, and is therefore a less stable basis for selective harvesting.</p> <p>Each collection has a corresponding OAI set, discoverable by harvesters via the ListSets verb. The setSpec is the Handle of the collection, with the ':' and '/' converted to underscores so that the Handle is a legal setSpec, for example:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java"> hdl_1721.1_1234 </pre> </div></div> <p>Naturally enough, the collection name is also the name of the corresponding set.</p> <h3><a name="ApplicationLayer-UniqueIdentifier"></a>Unique Identifier</h3> <p>Every item in OAI-PMH data repository must have an unique identifier, which must conform to the URI syntax. As of DSpace 1.2, Handles are not used; this is because in OAI-PMH, the OAI identifier identifies the <em>metadata record</em> associated with the <em>resource</em>. The <em>resource</em> is the DSpace item, whose <em>resource identifier</em> is the Handle. In practical terms, using the Handle for the OAI identifier may cause problems in the future if DSpace instances share items with the same Handles; the OAI metadata record identifiers should be different as the different DSpace instances would need to be harvested separately and may have different metadata for the item.</p> <p>The OAI identifiers that DSpace uses are of the form:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">oai:host name:handle</pre> </div></div> <p>For example:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">oai:dspace.myu.edu:123456789/345</pre> </div></div> <p>If you wish to use a different scheme, this can easily be changed by editing the value of <em>OAI_ID_PREFIX</em> at the top of the <tt>org.dspace.app.oai.DSpaceOAICatalog</tt> class. (You do not need to change the code if the above scheme works for you; the code picks up the host name and Handles automatically from the DSpace configuration.)</p> <h3><a name="ApplicationLayer-Accesscontrol"></a>Access control</h3> <p>OAI provides no authentication/authorisation details, although these could be implemented using standard HTTP methods. It is assumed that all access will be anonymous for the time being.</p> <p>A question is, "is all metadata public?" Presently the answer to this is yes; all metadata is exposed via OAI-PMH, even if the item has restricted access policies. The reasoning behind this is that people who do actually have permission to read a restricted item should still be able to use OAI-based services to discover the content.</p> <p>If in the future, this 'expose all metadata' approach proves unsatisfactory for any reason, it should be possible to expose only publicly readable metadata. The authorisation system has separate permissions for READing and item and READing the content (bitstreams) within it. This means the system can differentiate between an item with public metadata and hidden content, and an item with hidden metadata as well as hidden content. In this case the OAI data repository should only expose items those with anonymous READ access, so it can hide the existence of records to the outside world completely. In this scenario, one should be wary of protected items that are made public after a time. When this happens, the items are "new" from the OAI-PMH perspective.</p> <h3><a name="ApplicationLayer-ModificationDate%28OAIDateStamp%29"></a>Modification Date (OAI Date Stamp)</h3> <p>OAI-PMH harvesters need to know when a record has been created, changed or deleted. DSpace keeps track of a 'last modified' date for each item in the system, and this date is used for the OAI-PMH date stamp. This means that any changes to the metadata (e.g. admins correcting a field, or a withdrawal) will be exposed to harvesters.</p> <h3><a name="ApplicationLayer-%27About%27Information"></a>'About' Information</h3> <p>As part of each record given out to a harvester, there is an optional, repeatable "about" section which can be filled out in any (XML-schema conformant) way. Common uses are for provenance and rights information, and there are schemas in use by OAI communities for this. Presently DSpace does not provide any of this information.</p> <h3><a name="ApplicationLayer-Deletions"></a>Deletions</h3> <p>DSpace keeps track of deletions (withdrawals). These are exposed via OAI, which has a specific mechansim for dealing with this. Since DSpace keeps a permanent record of withdrawn items, in the OAI-PMH sense DSpace supports deletions 'persistently'. This is as opposed to 'transient' deletion support, which would mean that deleted records are forgotten after a time.</p> <p>Once an item has been withdrawn, OAI-PMH harvests of the date range in which the withdrawal occurred will find the 'deleted' record header. Harvests of a date range prior to the withdrawal will <em>not</em> find the record, despite the fact that the record did exist at that time.</p> <p>As an example of this, consider an item that was created on 2002-05-02 and withdrawn on 2002-10-06. A request to harvest the month 2002-10 will yield the 'record deleted' header. However, a harvest of the month 2002-05 will not yield the original record.</p> <p>Note that presently, the deletion of 'expunged' items is not exposed through OAI.</p> <h3><a name="ApplicationLayer-FlowControl%28ResumptionTokens%29"></a>Flow Control (Resumption Tokens)</h3> <p>An OAI data provider can prevent any performance impact caused by harvesting by forcing a harvester to receive data in time-separated chunks. If the data provider receives a request for a lot of data, it can send part of the data with a resumption token. The harvester can then return later with the resumption token and continue.</p> <p>DSpace supports resumption tokens for 'ListRecords' OAI-PMH requests. ListIdentifiers and ListSets requests do not produce a particularly high load on the system, so resumption tokens are not used for those requests.</p> <p>Each OAI-PMH ListRecords request will return at most 100 records. This limit is set at the top of <em>org.dspace.app.oai.DSpaceOAICatalog.java</em> (<em>MAX_RECORDS</em>). A potential issue here is that if a harvest yields an exact multiple of <em>MAX_RECORDS</em>, the last operation will result in a harvest with no records in it. It is unclear from the OAI-PMH specification if this is acceptable.</p> <p>When a resumption token is issued, the optional <em>completeListSize</em> and <em>cursor</em> attributes are not included. OAICat sets the <em>expirationDate</em> of the resumption token to one hour after it was issued, though in fact since DSpace resumption tokens contain all the information required to continue a request they do not actually expire.</p> <p>Resumption tokens contain all the state information required to continue a request. The format is:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java"> from/until/setSpec/offset </pre> </div></div> <p><em>from</em> and <em>until</em> are the ISO 8601 dates passed in as part of the original request, and <em>setSpec</em> is also taken from the original request. <em>offset</em> is the number of records that have already been sent to the harvester. For example:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java"> 2003-01-01<span class="code-comment">//hdl_1721_1_1234/300</span> </pre> </div></div> <p>This means the harvest is 'from'<br/> <em>2003-01-01</em>, has no 'until' date, is for collection hdl:1721.1/1234, and 300 records have already been sent to the harvester. (Actually, if the original OAI-PMH request doesn't specify a 'from' or 'until, OAICat fills them out automatically to '0000-00-00T00:00:00Z' and '9999-12-31T23:59:59Z' respectively. This means DSpace resumption tokens will always have from and until dates in them.)</p> <h2><a name="ApplicationLayer-DSpaceCommandLauncher"></a>DSpace Command Launcher</h2> <p>Introduced in Release 1.6, the DSpace Command Launcher brings together the various command and scripts into a standard-practice for running CLI runtime programs.</p> <h3><a name="ApplicationLayer-OlderVersions"></a>Older Versions</h3> <p>Prior to Release 1.6, there were various scripts written that masked a more manual approach to running CLI programs. The user had to issue <em>[dspace]/bin/dsrun</em> and then java class that ran that program. With release 1.5, scripts were written to mask the <em>[dspace]/bin/dsrun</em> command. We have left the java class in the System Administration section since it does have value for debugging purposes and for those who wish to learn about DSpace<br/> programming or wish to customize the code at any time.</p> <h3><a name="ApplicationLayer-CommandLauncherStructure"></a>Command Launcher Structure</h3> <p>There are two components to the command launcher: the dspace script and the launcher.xml. The DSpace command calls a java class which in turn refers to <em>launcher.xml</em> that is stored in the <em>[dspace]/config</em> directory</p> <p><em>launcher.xml</em> is made of several components:</p> <ul> <li><em>&lt;command&gt;</em> begins the stanza for a command</li> <li><em>&lt;name&gt;</em>_<em>name of command</em>_<em>&lt;/name&gt;</em> the name of the command that you would use.</li> <li><em>&lt;description&gt;</em>_<em>the description of the command</em>_<em>&lt;/description&gt;</em></li> <li><em>&lt;step&gt; &lt;/step&gt;</em> User arguments are parsed and tested.</li> <li><em>&lt;class&gt;</em>_<em>&lt;the java class that is being used to run the CLI program&gt;</em>_<em>&lt;/class&gt;</em><br/> Prior to release 1.5 if one wanted to regenerate the browse index, one would have to issue the following commands manually: <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">[dspace]/bin/dsrun org.dspace.browse.IndexBrowse -f -r [dspace]/bin/dsrun org.dspace.browse.ItemCounter [dspace]/bin/dsrun org.dspace.search.DSIndexer</pre> </div></div> <p>In release 1.5 a script was written and in release 1.6 the command <em>[dspace]/bin/dspace index-init</em> replaces the script. The stanza from <em>launcher.xml</em> show us how one can build more commands if needed:</p> <div class="code panel" style="border-width: 1px;"><div class="codeContent panelContent"> <pre class="code-java">&lt;command&gt; &lt;name&gt;index-update&lt;/name&gt; &lt;description&gt;Update the search and browse indexes&lt;/description&gt; &lt;step passuserargs=<span class="code-quote">"<span class="code-keyword">false</span>"</span>&gt; &lt;class&gt;org.dspace.browse.IndexBrowse&lt;/class&gt; &lt;argument&gt;-i&lt;/argument&gt; &lt;/step&gt; &lt;step passuserargs=<span class="code-quote">"<span class="code-keyword">false</span>"</span>&gt; &lt;class&gt;org.dspace.browse.ItemCounter&lt;/class&gt; &lt;/step&gt; &lt;step passuserargs=<span class="code-quote">"<span class="code-keyword">false</span>"</span>&gt; &lt;class&gt;org.dspace.search.DSIndexer&lt;/class&gt; &lt;/step&gt; &lt;/command&gt;</pre> </div></div> <p>.</p></li> </ul> <br/> <div class="tabletitle"> <a name="attachments">Attachments:</a> </div> <div class="greybox" align="left"> <img src="images/icons/bullet_blue.gif" height="8" width="8" alt=""/> <a href="attachments/22022819/21954860.gif">web-ui-flow.gif</a> (image/gif) <br/> </div> </td> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr> <td height="12" background="https://wiki.duraspace.org/images/border/border_bottom.gif"><img src="images/border/spacer.gif" width="1" height="1" border="0"/></td> </tr> <tr> <td align="center"><font color="grey">Document generated by Confluence on Mar 25, 2011 19:21</font></td> </tr> </table> </body> </html>
//@HEADER // ************************************************************************ // // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) // // ************************************************************************ //@HEADER #include <cstdio> #include <iostream> #include <fstream> #include <iomanip> #include <cstdlib> #include <cmath> #include <Kokkos_Core.hpp> #include <HexElement.hpp> #include <FEMesh.hpp> namespace HybridFEM { namespace Nonlinear { template< class MeshType , typename ScalarType > struct ElementComputation ; //---------------------------------------------------------------------------- template<> struct ElementComputation< FEMesh< double , 27 , Kokkos::Cuda > , double > { typedef Kokkos::Cuda execution_space ; static const unsigned ElementNodeCount = 27 ; typedef HexElement_Data< ElementNodeCount > element_data_type ; typedef FEMesh< double , ElementNodeCount , execution_space > mesh_type ; static const unsigned SpatialDim = element_data_type::spatial_dimension ; static const unsigned FunctionCount = element_data_type::function_count ; static const unsigned IntegrationCount = element_data_type::integration_count ; static const unsigned TensorDim = SpatialDim * SpatialDim ; typedef Kokkos::View< double[][FunctionCount][FunctionCount] , execution_space > elem_matrices_type ; typedef Kokkos::View< double[][FunctionCount] , execution_space > elem_vectors_type ; typedef Kokkos::View< double[] , execution_space > value_vector_type ; private: const element_data_type elem_data ; const typename mesh_type::elem_node_ids_type elem_node_ids ; const typename mesh_type::node_coords_type node_coords ; const value_vector_type nodal_values ; const elem_matrices_type element_matrices ; const elem_vectors_type element_vectors ; const float coeff_K ; const unsigned elem_count ; unsigned invJacIndex[9][4] ; static const unsigned j11 = 0 , j12 = 1 , j13 = 2 , j21 = 3 , j22 = 4 , j23 = 5 , j31 = 6 , j32 = 7 , j33 = 8 ; // Can only handle up to 16 warps: static const unsigned BlockDimX = 32 ; static const unsigned BlockDimY = 7 ; struct WorkSpace { double sum[ BlockDimY ][ BlockDimX ]; double value_at_integ[ IntegrationCount ]; double gradx_at_integ[ IntegrationCount ]; double grady_at_integ[ IntegrationCount ]; double gradz_at_integ[ IntegrationCount ]; float spaceJac[ BlockDimY ][ 9 ]; float spaceInvJac[ BlockDimY ][ 9 ]; float detJweight[ IntegrationCount ]; float dpsidx[ FunctionCount ][ IntegrationCount ]; float dpsidy[ FunctionCount ][ IntegrationCount ]; float dpsidz[ FunctionCount ][ IntegrationCount ]; }; public: ElementComputation ( const mesh_type & arg_mesh , const elem_matrices_type & arg_element_matrices , const elem_vectors_type & arg_element_vectors , const value_vector_type & arg_nodal_values , const float arg_coeff_K ) : elem_data() , elem_node_ids( arg_mesh.elem_node_ids ) , node_coords( arg_mesh.node_coords ) , nodal_values( arg_nodal_values ) , element_matrices( arg_element_matrices ) , element_vectors( arg_element_vectors ) , coeff_K( arg_coeff_K ) , elem_count( arg_mesh.elem_node_ids.dimension_0() ) { const unsigned jInvJ[9][4] = { { j22 , j33 , j23 , j32 } , { j13 , j32 , j12 , j33 } , { j12 , j23 , j13 , j22 } , { j23 , j31 , j21 , j33 } , { j11 , j33 , j13 , j31 } , { j13 , j21 , j11 , j23 } , { j21 , j32 , j22 , j31 } , { j12 , j31 , j11 , j32 } , { j11 , j22 , j12 , j21 } }; for ( unsigned i = 0 ; i < 9 ; ++i ) { for ( unsigned j = 0 ; j < 4 ; ++j ) { invJacIndex[i][j] = jInvJ[i][j] ; } } const unsigned shmem = sizeof(WorkSpace); const unsigned grid_max = 65535 ; const unsigned grid_count = std::min( grid_max , elem_count ); // For compute capability 2.x up to 1024 threads per block const dim3 block( BlockDimX , BlockDimY , 1 ); const dim3 grid( grid_count , 1 , 1 ); Kokkos::Impl::CudaParallelLaunch< ElementComputation >( *this , grid , block , shmem ); } public: //------------------------------------ // Sum among the threadIdx.x template< typename Type > __device__ inline static void sum_x( Type & result , const double value ) { extern __shared__ WorkSpace work_data[] ; volatile double * const base_sum = & work_data->sum[ threadIdx.y ][ threadIdx.x ] ; base_sum[ 0] = value ; if ( threadIdx.x < 16 ) { base_sum[0] += base_sum[16]; base_sum[0] += base_sum[ 8]; base_sum[0] += base_sum[ 4]; base_sum[0] += base_sum[ 2]; base_sum[0] += base_sum[ 1]; } if ( 0 == threadIdx.x ) { result = base_sum[0] ; } } __device__ inline static void sum_x_clear() { extern __shared__ WorkSpace work_data[] ; work_data->sum[ threadIdx.y ][ threadIdx.x ] = 0 ; } //------------------------------------ //------------------------------------ __device__ inline void evaluateFunctions( const unsigned ielem ) const { extern __shared__ WorkSpace work_data[] ; // Each warp (threadIdx.y) computes an integration point // Each thread is responsible for a node / function. const unsigned iFunc = threadIdx.x ; const bool hasFunc = iFunc < FunctionCount ; //------------------------------------ // Each warp gathers a different variable into 'elem_mat' shared memory. if ( hasFunc ) { const unsigned node = elem_node_ids( ielem , iFunc ); for ( unsigned iy = threadIdx.y ; iy < 4 ; iy += blockDim.y ) { switch( iy ) { case 0 : work_data->sum[0][iFunc] = node_coords(node,0); break ; case 1 : work_data->sum[1][iFunc] = node_coords(node,1); break ; case 2 : work_data->sum[2][iFunc] = node_coords(node,2); break ; case 3 : work_data->sum[3][iFunc] = nodal_values(node); break ; default: break ; } } } __syncthreads(); // Wait for all warps to finish gathering // now get local 'const' copies in register space: const double x = work_data->sum[0][ iFunc ]; const double y = work_data->sum[1][ iFunc ]; const double z = work_data->sum[2][ iFunc ]; const double dof_val = work_data->sum[3][ iFunc ]; __syncthreads(); // Wait for all warps to finish extracting sum_x_clear(); // Make sure summation scratch is zero //------------------------------------ // Each warp is now on its own computing an integration point // so no further explicit synchronizations are required. if ( hasFunc ) { float * const J = work_data->spaceJac[ threadIdx.y ]; float * const invJ = work_data->spaceInvJac[ threadIdx.y ]; for ( unsigned iInt = threadIdx.y ; iInt < IntegrationCount ; iInt += blockDim.y ) { const float val = elem_data.values[iInt][iFunc] ; const float gx = elem_data.gradients[iInt][0][iFunc] ; const float gy = elem_data.gradients[iInt][1][iFunc] ; const float gz = elem_data.gradients[iInt][2][iFunc] ; sum_x( J[j11], gx * x ); sum_x( J[j12], gx * y ); sum_x( J[j13], gx * z ); sum_x( J[j21], gy * x ); sum_x( J[j22], gy * y ); sum_x( J[j23], gy * z ); sum_x( J[j31], gz * x ); sum_x( J[j32], gz * y ); sum_x( J[j33], gz * z ); // Inverse jacobian, only enough parallel work for 9 threads in the warp if ( iFunc < TensorDim ) { invJ[ iFunc ] = J[ invJacIndex[iFunc][0] ] * J[ invJacIndex[iFunc][1] ] - J[ invJacIndex[iFunc][2] ] * J[ invJacIndex[iFunc][3] ] ; // Let all threads in the warp compute determinant into a register const float detJ = J[j11] * invJ[j11] + J[j21] * invJ[j12] + J[j31] * invJ[j13] ; invJ[ iFunc ] /= detJ ; if ( 0 == iFunc ) { work_data->detJweight[ iInt ] = detJ * elem_data.weights[ iInt ] ; } } // Transform bases gradients and compute value and gradient const float dx = gx * invJ[j11] + gy * invJ[j12] + gz * invJ[j13]; const float dy = gx * invJ[j21] + gy * invJ[j22] + gz * invJ[j23]; const float dz = gx * invJ[j31] + gy * invJ[j32] + gz * invJ[j33]; work_data->dpsidx[iFunc][iInt] = dx ; work_data->dpsidy[iFunc][iInt] = dy ; work_data->dpsidz[iFunc][iInt] = dz ; sum_x( work_data->gradx_at_integ[iInt] , dof_val * dx ); sum_x( work_data->grady_at_integ[iInt] , dof_val * dy ); sum_x( work_data->gradz_at_integ[iInt] , dof_val * dz ); sum_x( work_data->value_at_integ[iInt] , dof_val * val ); } } __syncthreads(); // All shared data must be populated at return. } __device__ inline void contributeResidualJacobian( const unsigned ielem ) const { extern __shared__ WorkSpace work_data[] ; sum_x_clear(); // Make sure summation scratch is zero // $$ R_i = \int_{\Omega} \nabla \phi_i \cdot (k \nabla T) + \phi_i T^2 d \Omega $$ // $$ J_{i,j} = \frac{\partial R_i}{\partial T_j} = \int_{\Omega} k \nabla \phi_i \cdot \nabla \phi_j + 2 \phi_i \phi_j T d \Omega $$ const unsigned iInt = threadIdx.x ; if ( iInt < IntegrationCount ) { const double value_at_integ = work_data->value_at_integ[ iInt ] ; const double gradx_at_integ = work_data->gradx_at_integ[ iInt ] ; const double grady_at_integ = work_data->grady_at_integ[ iInt ] ; const double gradz_at_integ = work_data->gradz_at_integ[ iInt ] ; const float detJweight = work_data->detJweight[ iInt ] ; const float coeff_K_detJweight = coeff_K * detJweight ; for ( unsigned iRow = threadIdx.y ; iRow < FunctionCount ; iRow += blockDim.y ) { const float value_row = elem_data.values[ iInt ][ iRow ] * detJweight ; const float dpsidx_row = work_data->dpsidx[ iRow ][ iInt ] * coeff_K_detJweight ; const float dpsidy_row = work_data->dpsidy[ iRow ][ iInt ] * coeff_K_detJweight ; const float dpsidz_row = work_data->dpsidz[ iRow ][ iInt ] * coeff_K_detJweight ; const double res_del = dpsidx_row * gradx_at_integ + dpsidy_row * grady_at_integ + dpsidz_row * gradz_at_integ ; const double res_val = value_at_integ * value_at_integ * value_row ; const double jac_val_row = 2 * value_at_integ * value_row ; sum_x( element_vectors( ielem , iRow ) , res_del + res_val ); for ( unsigned iCol = 0 ; iCol < FunctionCount ; ++iCol ) { const float jac_del = dpsidx_row * work_data->dpsidx[iCol][iInt] + dpsidy_row * work_data->dpsidy[iCol][iInt] + dpsidz_row * work_data->dpsidz[iCol][iInt] ; const double jac_val = jac_val_row * elem_data.values[ iInt ][ iCol ] ; sum_x( element_matrices( ielem , iRow , iCol ) , jac_del + jac_val ); } } } __syncthreads(); // All warps finish before refilling shared data } __device__ inline void operator()(void) const { extern __shared__ WorkSpace work_data[] ; for ( unsigned ielem = blockIdx.x ; ielem < elem_count ; ielem += gridDim.x ) { evaluateFunctions( ielem ); contributeResidualJacobian( ielem ); } } }; /* ElementComputation */ } /* namespace Nonlinear */ } /* namespace HybridFEM */
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Filter * @subpackage UnitTests * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * @see Zend_Filter_File_UpperCase */ require_once 'Zend/Filter/File/UpperCase.php'; /** * @category Zend * @package Zend_Filter * @subpackage UnitTests * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @group Zend_Filter */ class Zend_Filter_File_UpperCaseTest extends PHPUnit_Framework_TestCase { /** * Path to test files * * @var string */ protected $_filesPath; /** * Original testfile * * @var string */ protected $_origFile; /** * Testfile * * @var string */ protected $_newFile; /** * Sets the path to test files * * @return void */ public function __construct() { $this->_filesPath = dirname(__FILE__) . DIRECTORY_SEPARATOR . '..' . DIRECTORY_SEPARATOR . '_files' . DIRECTORY_SEPARATOR; $this->_origFile = $this->_filesPath . 'testfile2.txt'; $this->_newFile = $this->_filesPath . 'newtestfile2.txt'; } /** * Sets the path to test files * * @return void */ public function setUp() { if (!file_exists($this->_newFile)) { copy($this->_origFile, $this->_newFile); } } /** * Sets the path to test files * * @return void */ public function tearDown() { if (file_exists($this->_newFile)) { unlink($this->_newFile); } } /** * @return void */ public function testInstanceCreationAndNormalWorkflow() { $this->assertContains('This is a File', file_get_contents($this->_newFile)); $filter = new Zend_Filter_File_UpperCase(); $filter->filter($this->_newFile); $this->assertContains('THIS IS A FILE', file_get_contents($this->_newFile)); } /** * @return void */ public function testFileNotFoundException() { try { $filter = new Zend_Filter_File_UpperCase(); $filter->filter($this->_newFile . 'unknown'); $this->fail('Unknown file exception expected'); } catch (Zend_Filter_Exception $e) { $this->assertContains('not found', $e->getMessage()); } } /** * @return void */ public function testCheckSettingOfEncodingInIstance() { $this->assertContains('This is a File', file_get_contents($this->_newFile)); try { $filter = new Zend_Filter_File_UpperCase('ISO-8859-1'); $filter->filter($this->_newFile); $this->assertContains('THIS IS A FILE', file_get_contents($this->_newFile)); } catch (Zend_Filter_Exception $e) { $this->assertContains('mbstring is required', $e->getMessage()); } } /** * @return void */ public function testCheckSettingOfEncodingWithMethod() { $this->assertContains('This is a File', file_get_contents($this->_newFile)); try { $filter = new Zend_Filter_File_UpperCase(); $filter->setEncoding('ISO-8859-1'); $filter->filter($this->_newFile); $this->assertContains('THIS IS A FILE', file_get_contents($this->_newFile)); } catch (Zend_Filter_Exception $e) { $this->assertContains('mbstring is required', $e->getMessage()); } } }
--TEST-- Test script to verify that magic methods should be called only once when accessing an unset property. --CREDITS-- Marco Pivetta <ocramius@gmail.com> --FILE-- <?php class Test { public $publicProperty; protected $protectedProperty; private $privateProperty; public function __construct() { unset( $this->publicProperty, $this->protectedProperty, $this->privateProperty ); } function __get($name) { echo '__get ' . $name . "\n"; return $this->$name; } function __set($name, $value) { echo '__set ' . $name . "\n"; $this->$name = $value; } function __isset($name) { echo '__isset ' . $name . "\n"; return isset($this->$name); } } $test = new Test(); $test->nonExisting; $test->publicProperty; $test->protectedProperty; $test->privateProperty; isset($test->nonExisting); isset($test->publicProperty); isset($test->protectedProperty); isset($test->privateProperty); $test->nonExisting = 'value'; $test->publicProperty = 'value'; $test->protectedProperty = 'value'; $test->privateProperty = 'value'; ?> --EXPECTF-- __get nonExisting Notice: Undefined property: Test::$nonExisting in %s on line %d __get publicProperty Notice: Undefined property: Test::$publicProperty in %s on line %d __get protectedProperty Notice: Undefined property: Test::$protectedProperty in %s on line %d __get privateProperty Notice: Undefined property: Test::$privateProperty in %s on line %d __isset nonExisting __isset publicProperty __isset protectedProperty __isset privateProperty __set nonExisting __set publicProperty __set protectedProperty __set privateProperty
import qualified Data.Vector as U import Data.Bits main = print . U.maximumBy (\x y -> GT) . U.map (*2) . U.map (`shiftL` 2) $ U.replicate (100000000 :: Int) (5::Int)
#include "trimetrics.hpp" #include <math.h> void Metric2DTri::draw(int /*xwin*/, int /*ywin*/ ) { // draw metric information glCallList(drawingList); // draw moused point glBegin(GL_POINTS); glPointSize(5.0); glColor3f(0.0,0.0,0.0); glVertex3f(currX, currY, 0); glEnd(); } void Metric2DTri::mouseEvent(QMouseEvent *e, int xmax, int ymax, bool) { // convert window coords to world coords int ywin = ymax - e->y(); int xwin = e->x(); double nodes[3][3] = { {-.5,1,0}, {-.5,0,0}, {.5,0,0}}; nodes[0][0] = 2*xRange*(double)xwin/(double)xmax - xRange; nodes[0][1] = yRange*(double)ywin/(double)ymax; currX = nodes[0][0]; currY = nodes[0][1]; // calculate metric currMetricVal = (*func)(3, nodes); // emit value changed emit current_val_changed(); } void Metric2DTri::generate_plot() { // create a drawing list and delete old one if it exists if(drawingList) glDeleteLists(drawingList,1); drawingList = glGenLists(1); glNewList(drawingList, GL_COMPILE); { double nodes[3][3] = { {-.5,1,0}, {-.5,0,0}, {.5,0,0}}; glPointSize(4.0); // coordinates can range between (-xRange, xRange) and (0, yRange) double hscan , vscan; hscan = vscan = sqrt((double)NUM_POINTS); // scan vertically for(int i=0; i<vscan; i++) { nodes[0][1] = (double)i/(double)vscan * yRange; // scan horizontally for(int j=0; j<hscan; j++) { nodes[0][0] = (double)j/(double)hscan * 2 * xRange - xRange; // calculate metric double val = (*func)(3, nodes); // set color based on value glColor3f( (colorFactor-val)*(colorFactor-val), val*val,2*(colorFactor-val)*val); // draw the point glBegin(GL_POINTS); glVertex3d(nodes[0][0], nodes[0][1], nodes[0][2]); glEnd(); } } // draw fixed nodes glPointSize(5.0); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex3d(-.5,0,0); glVertex3d( .5,0,0); glEnd(); } glEndList(); }
/* * CVS Identifier: * * $Id: DataBlk.java,v 1.7 2001/04/15 14:32:05 grosbois Exp $ * * Interface: DataBlk * * Description: A generic interface to hold 2D blocks of data. * * * * COPYRIGHT: * * This software module was originally developed by Raphaël Grosbois and * Diego Santa Cruz (Swiss Federal Institute of Technology-EPFL); Joel * Askelöf (Ericsson Radio Systems AB); and Bertrand Berthelot, David * Bouchard, Félix Henry, Gerard Mozelle and Patrice Onno (Canon Research * Centre France S.A) in the course of development of the JPEG2000 * standard as specified by ISO/IEC 15444 (JPEG 2000 Standard). This * software module is an implementation of a part of the JPEG 2000 * Standard. Swiss Federal Institute of Technology-EPFL, Ericsson Radio * Systems AB and Canon Research Centre France S.A (collectively JJ2000 * Partners) agree not to assert against ISO/IEC and users of the JPEG * 2000 Standard (Users) any of their rights under the copyright, not * including other intellectual property rights, for this software module * with respect to the usage by ISO/IEC and Users of this software module * or modifications thereof for use in hardware or software products * claiming conformance to the JPEG 2000 Standard. Those intending to use * this software module in hardware or software products are advised that * their use may infringe existing patents. The original developers of * this software module, JJ2000 Partners and ISO/IEC assume no liability * for use of this software module or modifications thereof. No license * or right to this software module is granted for non JPEG 2000 Standard * conforming products. JJ2000 Partners have full right to use this * software module for his/her own purpose, assign or donate this * software module to any third party and to inhibit third parties from * using this software module for non JPEG 2000 Standard conforming * products. This copyright notice must be included in all copies or * derivative works of this software module. * * Copyright (c) 1999/2000 JJ2000 Partners. * */ using System; namespace CSJ2K.j2k.image { /// <summary> This is a generic abstract class to store data from a block of an /// image. This class does not have the notion of components. Therefore, it /// should be used for data from a single component. Subclasses should /// implement the different types of storage (<tt>int</tt>, <tt>float</tt>, /// etc.). /// /// <p>The data is always stored in one array, of the type matching the data /// type (i.e. for 'int' it's an 'int[]'). The data should be stored in the /// array in standard scan-line order. That is the samples go from the top-left /// corner of the code-block to the lower-right corner by line and then /// column.</p> /// /// <p>The member variable 'offset' gives the index in the array of the first /// data element (i.e. the top-left coefficient (ulx,uly)). The member variable /// 'scanw' gives the width of the scan that is used to store the data, that /// can be different from the width of the block. Element '(x,y)' of the /// code-block (i.e. '(ulx,uly)' is the top-left coefficient), will appear at /// position 'offset+(y-uly)*scanw+(x-ulx)' in the array of data.</p> /// /// <p>A block of data can have the <i>progressive</i> attribute set. Data is /// progressive when it is obtained by successive refinement and the values in /// this block are approximations of the "final" values. When the final values /// are returned the progressive attribute must be turned off.</p> /// /// <p>The classes <tt>DataBlkInt</tt> and <tt>DataBlkFloat</tt> provide /// implementations for <tt>int</tt> and <tt>float</tt> types respectively.</p> /// /// </summary> /// <seealso cref="DataBlkInt"> /// /// </seealso> /// <seealso cref="DataBlkFloat"> /// /// </seealso> public abstract class DataBlk { /// <summary> Returns the data type of the <tt>DataBlk</tt> object, as defined in /// this class. /// /// </summary> /// <returns> The data type of the object, as defined in thsi class. /// /// </returns> public abstract int DataType{get;} //UPGRADE_NOTE: Respective javadoc comments were merged. It should be changed in order to comply with .NET documentation conventions. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1199'" /// <summary> Returns the array containing the data, or null if there is no data. The /// returned array is of the type returned by <tt>getDataType()</tt> (e.g., /// for <tt>TYPE_INT</tt>, it is a <tt>int[]</tt>). /// /// <p>Each implementing class should provide a type specific equivalent /// method (e.g., <tt>getDataInt()</tt> in <tt>DataBlkInt</tt>) which /// returns an array of the correct type explicetely and not through an /// <tt>Object</tt>.</p> /// /// </summary> /// <returns> The array containing the data, or <tt>null</tt> if there is no /// data. /// /// </returns> /// <seealso cref="getDataType"> /// /// </seealso> /// <summary> Sets the data array to the specified one. The type of the specified /// data array must match the one returned by <tt>getDataType()</tt> (e.g., /// for <tt>TYPE_INT</tt>, it should be a <tt>int[]</tt>). If the wrong /// type of array is given a <tt>ClassCastException</tt> will be thrown. /// /// <p>The size of the array is not necessarily checked for consistency /// with <tt>w</tt> and <tt>h</tt> or any other fields.</p> /// /// <p>Each implementing class should provide a type specific equivalent /// method (e.g., <tt>setDataInt()</tt> in <tt>DataBlkInt</tt>) which takes /// an array of the correct type explicetely and not through an /// <tt>Object</tt>.</p> /// /// </summary> /// <param name="arr">The new data array to use /// /// </param> /// <seealso cref="getDataType"> /// /// </seealso> public abstract System.Object Data{get;set;} /// <summary>The identifier for the <tt>byte</tt> data type, as signed 8 bits. </summary> public const int TYPE_BYTE = 0; /// <summary>The identifier for the <tt>short</tt> data type, as signed 16 bits. </summary> public const int TYPE_SHORT = 1; /// <summary>The identifier for the <tt>int</tt> data type, as signed 32 bits. </summary> public const int TYPE_INT = 3; /// <summary>The identifier for the <tt>float</tt> data type </summary> public const int TYPE_FLOAT = 4; /// <summary>The horizontal coordinate (in pixels) of the upper-left corner of the /// block of data. This is relative to the component of the image from /// where this block was filled or is to be filled. /// </summary> public int ulx; /// <summary>The vertical coordinate of the upper-left corner of the block of /// data. This is relative to the component of the image from where this /// block was filled or is to be filled. /// </summary> public int uly; /// <summary>The width of the block, in pixels. </summary> public int w; /// <summary>The height of the block, in pixels. </summary> public int h; /// <summary>The offset in the array of the top-left coefficient </summary> public int offset; /// <summary>The width of the scanlines used to store the data in the array </summary> public int scanw; /// <summary>The progressive attribute (<tt>false</tt> by default) </summary> public bool progressive; /// <summary> Returns the size in bits, given the data type. The data type must be /// one defined in this class. An <tt>IllegalArgumentException</tt> is /// thrown if <tt>type</tt> is not defined in this class. /// /// </summary> /// <param name="type">The data type. /// /// </param> /// <returns> The size in bits of the data type. /// /// </returns> public static int getSize(int type) { switch (type) { case TYPE_BYTE: return 8; case TYPE_SHORT: return 16; case TYPE_INT: case TYPE_FLOAT: return 32; default: throw new System.ArgumentException(); } } /// <summary> Returns a string of informations about the DataBlk /// /// </summary> /// <returns> Block dimensions and progressiveness in a string /// /// </returns> public override System.String ToString() { System.String typeString = ""; switch (DataType) { case TYPE_BYTE: typeString = "Unsigned Byte"; break; case TYPE_SHORT: typeString = "Short"; break; case TYPE_INT: typeString = "Integer"; break; case TYPE_FLOAT: typeString = "Float"; break; } return "DataBlk: " + "upper-left(" + ulx + "," + uly + "), width=" + w + ", height=" + h + ", progressive=" + progressive + ", offset=" + offset + ", scanw=" + scanw + ", type=" + typeString; } } }
/* * vim:ts=4:sw=4:expandtab * * i3 - an improved dynamic tiling window manager * © 2009-2011 Michael Stapelberg and contributors (see also: LICENSE) * * click.c: Button press (mouse click) events. * */ #ifndef I3_CLICK_H #define I3_CLICK_H /** * The button press X callback. This function determines whether the floating * modifier is pressed and where the user clicked (decoration, border, inside * the window). * * Then, route_click is called on the appropriate con. * */ int handle_button_press(xcb_button_press_event_t *event); #endif
<html> <head></head> <body> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script> function get(url, type) { return new Promise(function(resolve, reject) { var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.responseType = type; xhr.onreadystatechange = function() { if (xhr.readyState === 4) { resolve(xhr); } } xhr.send(); }); } promise_test(t => { return get('resources/reply2.txt', 'text').then(xhr => { assert_equals(xhr.status, 200); assert_equals(xhr.responseURL, 'http://127.0.0.1:8000/xmlhttprequest/resources/reply2.txt'); }); }, 'no redirect, text'); promise_test(t => { return get('resources/redirect.php?url=reply2.xml', 'document').then(xhr => { assert_equals(xhr.status, 200); assert_equals(xhr.responseURL, 'http://127.0.0.1:8000/xmlhttprequest/resources/reply2.xml'); assert_equals(xhr.response.URL, 'http://127.0.0.1:8000/xmlhttprequest/resources/reply2.xml'); }); }, 'with redirect, document'); promise_test(t => { return get('resources/navigation-target.html#foobar', 'text').then(xhr => { assert_equals(xhr.status, 200); assert_equals(xhr.responseURL, 'http://127.0.0.1:8000/xmlhttprequest/resources/navigation-target.html'); }); }, 'no redirect, text, with fragments'); promise_test(t => { return get('resources/redirect.php?url=not-found.txt', 'text').then(xhr => { assert_equals(xhr.status, 404); assert_equals(xhr.responseURL, 'http://127.0.0.1:8000/xmlhttprequest/resources/not-found.txt'); }); }, 'with redirect, text, not found'); </script> </body> </html>
# # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # HOST_SYSTEM = $(shell uname | cut -f 1 -d_) SYSTEM ?= $(HOST_SYSTEM) CXX = g++ CPPFLAGS += `pkg-config --cflags protobuf grpc` CXXFLAGS += -std=c++11 ifeq ($(SYSTEM),Darwin) LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\ -lgrpc++_reflection\ -ldl else LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\ -Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\ -ldl endif PROTOC = protoc GRPC_CPP_PLUGIN = grpc_cpp_plugin GRPC_CPP_PLUGIN_PATH ?= `which $(GRPC_CPP_PLUGIN)` PROTOS_PATH = ../../protos vpath %.proto $(PROTOS_PATH) all: system-check greeter_client greeter_server greeter_client: helloworld.pb.o helloworld.grpc.pb.o greeter_client.o $(CXX) $^ $(LDFLAGS) -o $@ greeter_server: helloworld.pb.o helloworld.grpc.pb.o greeter_server.o $(CXX) $^ $(LDFLAGS) -o $@ .PRECIOUS: %.grpc.pb.cc %.grpc.pb.cc: %.proto $(PROTOC) -I $(PROTOS_PATH) --grpc_out=. --plugin=protoc-gen-grpc=$(GRPC_CPP_PLUGIN_PATH) $< .PRECIOUS: %.pb.cc %.pb.cc: %.proto $(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $< clean: rm -f *.o *.pb.cc *.pb.h greeter_client greeter_server # The following is to test your system and ensure a smoother experience. # They are by no means necessary to actually compile a grpc-enabled software. PROTOC_CMD = which $(PROTOC) PROTOC_CHECK_CMD = $(PROTOC) --version | grep -q libprotoc.3 PLUGIN_CHECK_CMD = which $(GRPC_CPP_PLUGIN) HAS_PROTOC = $(shell $(PROTOC_CMD) > /dev/null && echo true || echo false) ifeq ($(HAS_PROTOC),true) HAS_VALID_PROTOC = $(shell $(PROTOC_CHECK_CMD) 2> /dev/null && echo true || echo false) endif HAS_PLUGIN = $(shell $(PLUGIN_CHECK_CMD) > /dev/null && echo true || echo false) SYSTEM_OK = false ifeq ($(HAS_VALID_PROTOC),true) ifeq ($(HAS_PLUGIN),true) SYSTEM_OK = true endif endif system-check: ifneq ($(HAS_VALID_PROTOC),true) @echo " DEPENDENCY ERROR" @echo @echo "You don't have protoc 3.0.0 installed in your path." @echo "Please install Google protocol buffers 3.0.0 and its compiler." @echo "You can find it here:" @echo @echo " https://github.com/google/protobuf/releases/tag/v3.0.0" @echo @echo "Here is what I get when trying to evaluate your version of protoc:" @echo -$(PROTOC) --version @echo @echo endif ifneq ($(HAS_PLUGIN),true) @echo " DEPENDENCY ERROR" @echo @echo "You don't have the grpc c++ protobuf plugin installed in your path." @echo "Please install grpc. You can find it here:" @echo @echo " https://github.com/grpc/grpc" @echo @echo "Here is what I get when trying to detect if you have the plugin:" @echo -which $(GRPC_CPP_PLUGIN) @echo @echo endif ifneq ($(SYSTEM_OK),true) @false endif
/* * This file is part of the Soletta Project * * Copyright (C) 2015 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <errno.h> #include <stdlib.h> #include <stdio.h> #include "sol-mainloop.h" #include "sol-platform.h" #include "sol-util.h" #define CMD_TICK 2000 static char **cmds; static int n_cmds; static int cur_cmd; static struct sol_timeout *timeout_handle; static void on_state_change(void *data, enum sol_platform_state state) { printf("Platform state changed. New state: %d\n", state); } static void on_service_changed(void *data, const char *service, enum sol_platform_service_state state) { printf("Service state changed: '%s'. New state: %d\n", service, state); } static bool on_timeout_cmd(void *data) { const char *cmd; const char *param; cmd = cmds[cur_cmd++]; param = cmds[cur_cmd++]; printf("Firing new command: %s %s\n", cmd, param); if (streq(cmd, "monitor")) sol_platform_add_service_monitor(on_service_changed, param, NULL); else if (streq(cmd, "stop-monitor")) sol_platform_del_service_monitor(on_service_changed, param, NULL); else if (streq(cmd, "start")) sol_platform_start_service(param); else if (streq(cmd, "stop")) sol_platform_stop_service(param); else if (streq(cmd, "restart")) sol_platform_restart_service(param); else if (streq(cmd, "target")) sol_platform_set_target(param); if (n_cmds - cur_cmd >= 2) return true; timeout_handle = NULL; return false; } int main(int argc, char *argv[]) { int r = 0; if (sol_init() < 0) return EXIT_FAILURE; printf("Initial platform state: %d\n", sol_platform_get_state()); sol_platform_add_state_monitor(on_state_change, NULL); if (argc > 2) { cmds = argv + 1; n_cmds = argc - 1; timeout_handle = sol_timeout_add(CMD_TICK, on_timeout_cmd, NULL); } sol_run(); if (timeout_handle) sol_timeout_del(timeout_handle); sol_platform_del_state_monitor(on_state_change, NULL); sol_shutdown(); return r; }
package CPANPLUS::Module::Author::Fake; use CPANPLUS::Module::Author; use CPANPLUS::Internals; use CPANPLUS::Error; use strict; use vars qw[@ISA]; use Params::Check qw[check]; @ISA = qw[CPANPLUS::Module::Author]; $Params::Check::VERBOSE = 1; =pod =head1 NAME CPANPLUS::Module::Author::Fake - dummy author object for CPANPLUS =head1 SYNOPSIS my $auth = CPANPLUS::Module::Author::Fake->new( author => 'Foo Bar', email => 'luser@foo.com', cpanid => 'FOO', _id => $cpan->id, ); =head1 DESCRIPTION A class for creating fake author objects, for shortcut use internally by CPANPLUS. Inherits from C<CPANPLUS::Module::Author>. =head1 METHODS =head2 new( _id => DIGIT ) Creates a dummy author object. It can take the same options as C<< CPANPLUS::Module::Author->new >>, but will fill in default ones if none are provided. Only the _id key is required. =cut sub new { my $class = shift; my %hash = @_; my $tmpl = { author => { default => 'CPANPLUS Internals' }, email => { default => 'cpanplus-info@lists.sf.net' }, cpanid => { default => 'CPANPLUS' }, _id => { default => CPANPLUS::Internals->_last_id }, }; my $args = check( $tmpl, \%hash ) or return; my $obj = CPANPLUS::Module::Author->new( %$args ) or return; unless( $obj->_id ) { error(loc("No '%1' specified -- No CPANPLUS object associated!",'_id')); return; } ### rebless object ### return bless $obj, $class; } 1; # Local variables: # c-indentation-style: bsd # c-basic-offset: 4 # indent-tabs-mode: nil # End: # vim: expandtab shiftwidth=4:
<!DOCTYPE HTML> <script src="../resources/text-based-repaint.js"></script> <style> body { margin: 0; } #container { position: relative; top: 40px; width: 200px; height: 400px; } #test { position: fixed; background-color: green; width: 100px; height: 100px; } </style> <script> function repaintTest() { document.getElementById("test").style['margin-top'] = '20px'; } window.onload = runRepaintAndPixelTest; </script> <div id="container"> <div id="test"></div> </div> Tests the repainting of fixed element when margin-top changes.
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_TAB_CONTENTS_TAB_CONTENTS_ITERATOR_H_ #define CHROME_BROWSER_UI_TAB_CONTENTS_TAB_CONTENTS_ITERATOR_H_ #include <iterator> #include "chrome/browser/ui/browser_list.h" namespace content { class WebContents; } // Iterates through all tab contents in all browser windows. Because the // renderers act asynchronously, getting a tab contents through this interface // does not guarantee that the renderer is ready to go. Doing anything to affect // browser windows or tabs while iterating may cause incorrect behavior. // // Examples: // // for (auto* web_contents : AllTabContentses()) { // SomeFunctionTakingWebContents(web_contents); // -or- // web_contents->OperationOnWebContents(); // ... // } // // auto& all_tabs = AllTabContentses(); // auto it = some_std_algorithm(all_tabs.begin(), all_tabs.end(), ...); class AllTabContentsesList { public: class Iterator { public: using iterator_category = std::forward_iterator_tag; using value_type = content::WebContents*; using difference_type = ptrdiff_t; using pointer = value_type*; using reference = const value_type&; Iterator(); Iterator(const Iterator& iterator); ~Iterator(); value_type operator->() { return cur_; } reference operator*() { return cur_; } Iterator& operator++() { Next(); return *this; } Iterator operator++(int) { Iterator it(*this); Next(); return it; } bool operator==(const Iterator& other) const { return cur_ == other.cur_; } bool operator!=(const Iterator& other) const { return !(*this == other); } // Returns the Browser instance associated with the current tab contents. // Valid as long as this iterator != the AllTabContentses().end() iterator. Browser* browser() const { return browser_iterator_ == BrowserList::GetInstance()->end() ? nullptr : *browser_iterator_; } private: friend class AllTabContentsesList; explicit Iterator(bool is_end_iter); // Loads the next tab contents into |cur_|. This is designed so that for the // initial call from the constructor, when |browser_iterator_| points to the // first Browser and |tab_index_| is -1, it will fill the first tab // contents. void Next(); // Tab index into the current Browser of the current tab contents. int tab_index_; // Current WebContents, or null if we're at the end of the list. This can be // extracted given the browser iterator and index, but it's nice to cache // this since the caller may access the current tab contents many times. content::WebContents* cur_; // An iterator over all the browsers. BrowserList::const_iterator browser_iterator_; }; using iterator = Iterator; using const_iterator = Iterator; const_iterator begin() const { return Iterator(false); } const_iterator end() const { return Iterator(true); } AllTabContentsesList() = default; ~AllTabContentsesList() = default; }; const AllTabContentsesList& AllTabContentses(); #endif // CHROME_BROWSER_UI_TAB_CONTENTS_TAB_CONTENTS_ITERATOR_H_
/* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code that modified the OS layer in order to simulate ** the effect on the database file of an OS crash or power failure. This ** is used to test the ability of SQLite to recover from those situations. */ #if SQLITE_TEST /* This file is used for testing only */ #include "sqliteInt.h" #if defined(INCLUDE_SQLITE_TCL_H) # include "sqlite_tcl.h" #else # include "tcl.h" #endif #ifndef SQLITE_OMIT_DISKIO /* This file is a no-op if disk I/O is disabled */ /* #define TRACE_CRASHTEST */ typedef struct CrashFile CrashFile; typedef struct CrashGlobal CrashGlobal; typedef struct WriteBuffer WriteBuffer; /* ** Method: ** ** This layer is implemented as a wrapper around the "real" ** sqlite3_file object for the host system. Each time data is ** written to the file object, instead of being written to the ** underlying file, the write operation is stored in an in-memory ** structure (type WriteBuffer). This structure is placed at the ** end of a global ordered list (the write-list). ** ** When data is read from a file object, the requested region is ** first retrieved from the real file. The write-list is then ** traversed and data copied from any overlapping WriteBuffer ** structures to the output buffer. i.e. a read() operation following ** one or more write() operations works as expected, even if no ** data has actually been written out to the real file. ** ** When a fsync() operation is performed, an operating system crash ** may be simulated, in which case exit(-1) is called (the call to ** xSync() never returns). Whether or not a crash is simulated, ** the data associated with a subset of the WriteBuffer structures ** stored in the write-list is written to the real underlying files ** and the entries removed from the write-list. If a crash is simulated, ** a subset of the buffers may be corrupted before the data is written. ** ** The exact subset of the write-list written and/or corrupted is ** determined by the simulated device characteristics and sector-size. ** ** "Normal" mode: ** ** Normal mode is used when the simulated device has none of the ** SQLITE_IOCAP_XXX flags set. ** ** In normal mode, if the fsync() is not a simulated crash, the ** write-list is traversed from beginning to end. Each WriteBuffer ** structure associated with the file handle used to call xSync() ** is written to the real file and removed from the write-list. ** ** If a crash is simulated, one of the following takes place for ** each WriteBuffer in the write-list, regardless of which ** file-handle it is associated with: ** ** 1. The buffer is correctly written to the file, just as if ** a crash were not being simulated. ** ** 2. Nothing is done. ** ** 3. Garbage data is written to all sectors of the file that ** overlap the region specified by the WriteBuffer. Or garbage ** data is written to some contiguous section within the ** overlapped sectors. ** ** Device Characteristic flag handling: ** ** If the IOCAP_ATOMIC flag is set, then option (3) above is ** never selected. ** ** If the IOCAP_ATOMIC512 flag is set, and the WriteBuffer represents ** an aligned write() of an integer number of 512 byte regions, then ** option (3) above is never selected. Instead, each 512 byte region ** is either correctly written or left completely untouched. Similar ** logic governs the behavior if any of the other ATOMICXXX flags ** is set. ** ** If either the IOCAP_SAFEAPPEND or IOCAP_SEQUENTIAL flags are set ** and a crash is being simulated, then an entry of the write-list is ** selected at random. Everything in the list after the selected entry ** is discarded before processing begins. ** ** If IOCAP_SEQUENTIAL is set and a crash is being simulated, option ** (1) is selected for all write-list entries except the last. If a ** crash is not being simulated, then all entries in the write-list ** that occur before at least one write() on the file-handle specified ** as part of the xSync() are written to their associated real files. ** ** If IOCAP_SAFEAPPEND is set and the first byte written by the write() ** operation is one byte past the current end of the file, then option ** (1) is always selected. */ /* ** Each write operation in the write-list is represented by an instance ** of the following structure. ** ** If zBuf is 0, then this structure represents a call to xTruncate(), ** not xWrite(). In that case, iOffset is the size that the file is ** truncated to. */ struct WriteBuffer { i64 iOffset; /* Byte offset of the start of this write() */ int nBuf; /* Number of bytes written */ u8 *zBuf; /* Pointer to copy of written data */ CrashFile *pFile; /* File this write() applies to */ WriteBuffer *pNext; /* Next in CrashGlobal.pWriteList */ }; struct CrashFile { const sqlite3_io_methods *pMethod; /* Must be first */ sqlite3_file *pRealFile; /* Underlying "real" file handle */ char *zName; int flags; /* Flags the file was opened with */ /* Cache of the entire file. This is used to speed up OsRead() and ** OsFileSize() calls. Although both could be done by traversing the ** write-list, in practice this is impractically slow. */ u8 *zData; /* Buffer containing file contents */ int nData; /* Size of buffer allocated at zData */ i64 iSize; /* Size of file in bytes */ }; struct CrashGlobal { WriteBuffer *pWriteList; /* Head of write-list */ WriteBuffer *pWriteListEnd; /* End of write-list */ int iSectorSize; /* Value of simulated sector size */ int iDeviceCharacteristics; /* Value of simulated device characteristics */ int iCrash; /* Crash on the iCrash'th call to xSync() */ char zCrashFile[500]; /* Crash during an xSync() on this file */ }; static CrashGlobal g = {0, 0, SQLITE_DEFAULT_SECTOR_SIZE, 0, 0}; /* ** Set this global variable to 1 to enable crash testing. */ static int sqlite3CrashTestEnable = 0; static void *crash_malloc(int nByte){ return (void *)Tcl_AttemptAlloc((size_t)nByte); } static void crash_free(void *p){ Tcl_Free(p); } static void *crash_realloc(void *p, int n){ return (void *)Tcl_AttemptRealloc(p, (size_t)n); } /* ** Wrapper around the sqlite3OsWrite() function that avoids writing to the ** 512 byte block begining at offset PENDING_BYTE. */ static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ int rc = SQLITE_OK; int iSkip = 0; if( (iAmt-iSkip)>0 ){ rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], (int)(iAmt-iSkip), iOff+iSkip); } return rc; } /* ** Flush the write-list as if xSync() had been called on file handle ** pFile. If isCrash is true, simulate a crash. */ static int writeListSync(CrashFile *pFile, int isCrash){ int rc = SQLITE_OK; int iDc = g.iDeviceCharacteristics; WriteBuffer *pWrite; WriteBuffer **ppPtr; /* If this is not a crash simulation, set pFinal to point to the ** last element of the write-list that is associated with file handle ** pFile. ** ** If this is a crash simulation, set pFinal to an arbitrarily selected ** element of the write-list. */ WriteBuffer *pFinal = 0; if( !isCrash ){ for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext){ if( pWrite->pFile==pFile ){ pFinal = pWrite; } } }else if( iDc&(SQLITE_IOCAP_SEQUENTIAL|SQLITE_IOCAP_SAFE_APPEND) ){ int nWrite = 0; int iFinal; for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext) nWrite++; sqlite3_randomness(sizeof(int), &iFinal); iFinal = ((iFinal<0)?-1*iFinal:iFinal)%nWrite; for(pWrite=g.pWriteList; iFinal>0; pWrite=pWrite->pNext) iFinal--; pFinal = pWrite; } #ifdef TRACE_CRASHTEST if( pFile ){ printf("Sync %s (is %s crash)\n", pFile->zName, (isCrash?"a":"not a")); } #endif ppPtr = &g.pWriteList; for(pWrite=*ppPtr; rc==SQLITE_OK && pWrite; pWrite=*ppPtr){ sqlite3_file *pRealFile = pWrite->pFile->pRealFile; /* (eAction==1) -> write block out normally, ** (eAction==2) -> do nothing, ** (eAction==3) -> trash sectors. */ int eAction = 0; if( !isCrash ){ eAction = 2; if( (pWrite->pFile==pFile || iDc&SQLITE_IOCAP_SEQUENTIAL) ){ eAction = 1; } }else{ char random; sqlite3_randomness(1, &random); /* Do not select option 3 (sector trashing) if the IOCAP_ATOMIC flag ** is set or this is an OsTruncate(), not an Oswrite(). */ if( (iDc&SQLITE_IOCAP_ATOMIC) || (pWrite->zBuf==0) ){ random &= 0x01; } /* If IOCAP_SEQUENTIAL is set and this is not the final entry ** in the truncated write-list, always select option 1 (write ** out correctly). */ if( (iDc&SQLITE_IOCAP_SEQUENTIAL && pWrite!=pFinal) ){ random = 0; } /* If IOCAP_SAFE_APPEND is set and this OsWrite() operation is ** an append (first byte of the written region is 1 byte past the ** current EOF), always select option 1 (write out correctly). */ if( iDc&SQLITE_IOCAP_SAFE_APPEND && pWrite->zBuf ){ i64 iSize; sqlite3OsFileSize(pRealFile, &iSize); if( iSize==pWrite->iOffset ){ random = 0; } } if( (random&0x06)==0x06 ){ eAction = 3; }else{ eAction = ((random&0x01)?2:1); } } switch( eAction ){ case 1: { /* Write out correctly */ if( pWrite->zBuf ){ rc = writeDbFile( pWrite->pFile, pWrite->zBuf, pWrite->nBuf, pWrite->iOffset ); }else{ rc = sqlite3OsTruncate(pRealFile, pWrite->iOffset); } *ppPtr = pWrite->pNext; #ifdef TRACE_CRASHTEST if( isCrash ){ printf("Writing %d bytes @ %d (%s)\n", pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName ); } #endif crash_free(pWrite); break; } case 2: { /* Do nothing */ ppPtr = &pWrite->pNext; #ifdef TRACE_CRASHTEST if( isCrash ){ printf("Omiting %d bytes @ %d (%s)\n", pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName ); } #endif break; } case 3: { /* Trash sectors */ u8 *zGarbage; int iFirst = (int)(pWrite->iOffset/g.iSectorSize); int iLast = (int)((pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize); assert(pWrite->zBuf); #ifdef TRACE_CRASHTEST printf("Trashing %d sectors (%d bytes) @ %lld (sector %d) (%s)\n", 1+iLast-iFirst, (1+iLast-iFirst)*g.iSectorSize, pWrite->iOffset, iFirst, pWrite->pFile->zName ); #endif zGarbage = crash_malloc(g.iSectorSize); if( zGarbage ){ sqlite3_int64 i; for(i=iFirst; rc==SQLITE_OK && i<=iLast; i++){ sqlite3_randomness(g.iSectorSize, zGarbage); rc = writeDbFile( pWrite->pFile, zGarbage, g.iSectorSize, i*g.iSectorSize ); } crash_free(zGarbage); }else{ rc = SQLITE_NOMEM; } ppPtr = &pWrite->pNext; break; } default: assert(!"Cannot happen"); } if( pWrite==pFinal ) break; } if( rc==SQLITE_OK && isCrash ){ exit(-1); } for(pWrite=g.pWriteList; pWrite && pWrite->pNext; pWrite=pWrite->pNext); g.pWriteListEnd = pWrite; return rc; } /* ** Add an entry to the end of the write-list. */ static int writeListAppend( sqlite3_file *pFile, sqlite3_int64 iOffset, const u8 *zBuf, int nBuf ){ WriteBuffer *pNew; assert((zBuf && nBuf) || (!nBuf && !zBuf)); pNew = (WriteBuffer *)crash_malloc(sizeof(WriteBuffer) + nBuf); if( pNew==0 ){ fprintf(stderr, "out of memory in the crash simulator\n"); } memset(pNew, 0, sizeof(WriteBuffer)+nBuf); pNew->iOffset = iOffset; pNew->nBuf = nBuf; pNew->pFile = (CrashFile *)pFile; if( zBuf ){ pNew->zBuf = (u8 *)&pNew[1]; memcpy(pNew->zBuf, zBuf, nBuf); } if( g.pWriteList ){ assert(g.pWriteListEnd); g.pWriteListEnd->pNext = pNew; }else{ g.pWriteList = pNew; } g.pWriteListEnd = pNew; return SQLITE_OK; } /* ** Close a crash-file. */ static int cfClose(sqlite3_file *pFile){ CrashFile *pCrash = (CrashFile *)pFile; writeListSync(pCrash, 0); sqlite3OsClose(pCrash->pRealFile); return SQLITE_OK; } /* ** Read data from a crash-file. */ static int cfRead( sqlite3_file *pFile, void *zBuf, int iAmt, sqlite_int64 iOfst ){ CrashFile *pCrash = (CrashFile *)pFile; int nCopy = (int)MIN((i64)iAmt, (pCrash->iSize - iOfst)); if( nCopy>0 ){ memcpy(zBuf, &pCrash->zData[iOfst], nCopy); } /* Check the file-size to see if this is a short-read */ if( nCopy<iAmt ){ return SQLITE_IOERR_SHORT_READ; } return SQLITE_OK; } /* ** Write data to a crash-file. */ static int cfWrite( sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ CrashFile *pCrash = (CrashFile *)pFile; if( iAmt+iOfst>pCrash->iSize ){ pCrash->iSize = (int)(iAmt+iOfst); } while( pCrash->iSize>pCrash->nData ){ u8 *zNew; int nNew = (pCrash->nData*2) + 4096; zNew = crash_realloc(pCrash->zData, nNew); if( !zNew ){ return SQLITE_NOMEM; } memset(&zNew[pCrash->nData], 0, nNew-pCrash->nData); pCrash->nData = nNew; pCrash->zData = zNew; } memcpy(&pCrash->zData[iOfst], zBuf, iAmt); return writeListAppend(pFile, iOfst, zBuf, iAmt); } /* ** Truncate a crash-file. */ static int cfTruncate(sqlite3_file *pFile, sqlite_int64 size){ CrashFile *pCrash = (CrashFile *)pFile; assert(size>=0); if( pCrash->iSize>size ){ pCrash->iSize = (int)size; } return writeListAppend(pFile, size, 0, 0); } /* ** Sync a crash-file. */ static int cfSync(sqlite3_file *pFile, int flags){ CrashFile *pCrash = (CrashFile *)pFile; int isCrash = 0; const char *zName = pCrash->zName; const char *zCrashFile = g.zCrashFile; int nName = (int)strlen(zName); int nCrashFile = (int)strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } #ifdef TRACE_CRASHTEST printf("cfSync(): nName = %d, nCrashFile = %d, zName = %s, zCrashFile = %s\n", nName, nCrashFile, zName, zCrashFile); #endif if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ #ifdef TRACE_CRASHTEST printf("cfSync(): name matched, g.iCrash = %d\n", g.iCrash); #endif if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* ** Return the current file-size of the crash-file. */ static int cfFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ CrashFile *pCrash = (CrashFile *)pFile; *pSize = (i64)pCrash->iSize; return SQLITE_OK; } /* ** Calls related to file-locks are passed on to the real file handle. */ static int cfLock(sqlite3_file *pFile, int eLock){ return sqlite3OsLock(((CrashFile *)pFile)->pRealFile, eLock); } static int cfUnlock(sqlite3_file *pFile, int eLock){ return sqlite3OsUnlock(((CrashFile *)pFile)->pRealFile, eLock); } static int cfCheckReservedLock(sqlite3_file *pFile, int *pResOut){ return sqlite3OsCheckReservedLock(((CrashFile *)pFile)->pRealFile, pResOut); } static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ if( op==SQLITE_FCNTL_SIZE_HINT ){ CrashFile *pCrash = (CrashFile *)pFile; i64 nByte = *(i64 *)pArg; if( nByte>pCrash->iSize ){ if( SQLITE_OK==writeListAppend(pFile, nByte, 0, 0) ){ pCrash->iSize = (int)nByte; } } return SQLITE_OK; } return sqlite3OsFileControl(((CrashFile *)pFile)->pRealFile, op, pArg); } /* ** The xSectorSize() and xDeviceCharacteristics() functions return ** the global values configured by the [sqlite_crashparams] tcl * interface. */ static int cfSectorSize(sqlite3_file *pFile){ return g.iSectorSize; } static int cfDeviceCharacteristics(sqlite3_file *pFile){ return g.iDeviceCharacteristics; } /* ** Pass-throughs for WAL support. */ static int cfShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ return sqlite3OsShmLock(((CrashFile*)pFile)->pRealFile, ofst, n, flags); } static void cfShmBarrier(sqlite3_file *pFile){ sqlite3OsShmBarrier(((CrashFile*)pFile)->pRealFile); } static int cfShmUnmap(sqlite3_file *pFile, int delFlag){ return sqlite3OsShmUnmap(((CrashFile*)pFile)->pRealFile, delFlag); } static int cfShmMap( sqlite3_file *pFile, /* Handle open on database file */ int iRegion, /* Region to retrieve */ int sz, /* Size of regions */ int w, /* True to extend file if necessary */ void volatile **pp /* OUT: Mapped memory */ ){ return sqlite3OsShmMap(((CrashFile*)pFile)->pRealFile, iRegion, sz, w, pp); } static const sqlite3_io_methods CrashFileVtab = { 2, /* iVersion */ cfClose, /* xClose */ cfRead, /* xRead */ cfWrite, /* xWrite */ cfTruncate, /* xTruncate */ cfSync, /* xSync */ cfFileSize, /* xFileSize */ cfLock, /* xLock */ cfUnlock, /* xUnlock */ cfCheckReservedLock, /* xCheckReservedLock */ cfFileControl, /* xFileControl */ cfSectorSize, /* xSectorSize */ cfDeviceCharacteristics, /* xDeviceCharacteristics */ cfShmMap, /* xShmMap */ cfShmLock, /* xShmLock */ cfShmBarrier, /* xShmBarrier */ cfShmUnmap /* xShmUnmap */ }; /* ** Application data for the crash VFS */ struct crashAppData { sqlite3_vfs *pOrig; /* Wrapped vfs structure */ }; /* ** Open a crash-file file handle. ** ** The caller will have allocated pVfs->szOsFile bytes of space ** at pFile. This file uses this space for the CrashFile structure ** and allocates space for the "real" file structure using ** sqlite3_malloc(). The assumption here is (pVfs->szOsFile) is ** equal or greater than sizeof(CrashFile). */ static int cfOpen( sqlite3_vfs *pCfVfs, const char *zName, sqlite3_file *pFile, int flags, int *pOutFlags ){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; int rc; CrashFile *pWrapper = (CrashFile *)pFile; sqlite3_file *pReal = (sqlite3_file*)&pWrapper[1]; memset(pWrapper, 0, sizeof(CrashFile)); rc = sqlite3OsOpen(pVfs, zName, pReal, flags, pOutFlags); if( rc==SQLITE_OK ){ i64 iSize; pWrapper->pMethod = &CrashFileVtab; pWrapper->zName = (char *)zName; pWrapper->pRealFile = pReal; rc = sqlite3OsFileSize(pReal, &iSize); pWrapper->iSize = (int)iSize; pWrapper->flags = flags; } if( rc==SQLITE_OK ){ pWrapper->nData = (int)(4096 + pWrapper->iSize); pWrapper->zData = crash_malloc(pWrapper->nData); if( pWrapper->zData ){ /* os_unix.c contains an assert() that fails if the caller attempts ** to read data from the 512-byte locking region of a file opened ** with the SQLITE_OPEN_MAIN_DB flag. This region of a database file ** never contains valid data anyhow. So avoid doing such a read here. ** ** UPDATE: It also contains an assert() verifying that each call ** to the xRead() method reads less than 128KB of data. */ i64 iOff; memset(pWrapper->zData, 0, pWrapper->nData); for(iOff=0; iOff<pWrapper->iSize; iOff += 512){ int nRead = (int)(pWrapper->iSize - iOff); if( nRead>512 ) nRead = 512; rc = sqlite3OsRead(pReal, &pWrapper->zData[iOff], nRead, iOff); } }else{ rc = SQLITE_NOMEM; } } if( rc!=SQLITE_OK && pWrapper->pMethod ){ sqlite3OsClose(pFile); } return rc; } static int cfDelete(sqlite3_vfs *pCfVfs, const char *zPath, int dirSync){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xDelete(pVfs, zPath, dirSync); } static int cfAccess( sqlite3_vfs *pCfVfs, const char *zPath, int flags, int *pResOut ){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xAccess(pVfs, zPath, flags, pResOut); } static int cfFullPathname( sqlite3_vfs *pCfVfs, const char *zPath, int nPathOut, char *zPathOut ){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); } static void *cfDlOpen(sqlite3_vfs *pCfVfs, const char *zPath){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xDlOpen(pVfs, zPath); } static void cfDlError(sqlite3_vfs *pCfVfs, int nByte, char *zErrMsg){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; pVfs->xDlError(pVfs, nByte, zErrMsg); } static void (*cfDlSym(sqlite3_vfs *pCfVfs, void *pH, const char *zSym))(void){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xDlSym(pVfs, pH, zSym); } static void cfDlClose(sqlite3_vfs *pCfVfs, void *pHandle){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; pVfs->xDlClose(pVfs, pHandle); } static int cfRandomness(sqlite3_vfs *pCfVfs, int nByte, char *zBufOut){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xRandomness(pVfs, nByte, zBufOut); } static int cfSleep(sqlite3_vfs *pCfVfs, int nMicro){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xSleep(pVfs, nMicro); } static int cfCurrentTime(sqlite3_vfs *pCfVfs, double *pTimeOut){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xCurrentTime(pVfs, pTimeOut); } static int cfGetLastError(sqlite3_vfs *pCfVfs, int n, char *z){ sqlite3_vfs *pVfs = (sqlite3_vfs *)pCfVfs->pAppData; return pVfs->xGetLastError(pVfs, n, z); } static int processDevSymArgs( Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[], int *piDeviceChar, int *piSectorSize ){ struct DeviceFlag { char *zName; int iValue; } aFlag[] = { { "atomic", SQLITE_IOCAP_ATOMIC }, { "atomic512", SQLITE_IOCAP_ATOMIC512 }, { "atomic1k", SQLITE_IOCAP_ATOMIC1K }, { "atomic2k", SQLITE_IOCAP_ATOMIC2K }, { "atomic4k", SQLITE_IOCAP_ATOMIC4K }, { "atomic8k", SQLITE_IOCAP_ATOMIC8K }, { "atomic16k", SQLITE_IOCAP_ATOMIC16K }, { "atomic32k", SQLITE_IOCAP_ATOMIC32K }, { "atomic64k", SQLITE_IOCAP_ATOMIC64K }, { "sequential", SQLITE_IOCAP_SEQUENTIAL }, { "safe_append", SQLITE_IOCAP_SAFE_APPEND }, { "powersafe_overwrite", SQLITE_IOCAP_POWERSAFE_OVERWRITE }, { "batch-atomic", SQLITE_IOCAP_BATCH_ATOMIC }, { 0, 0 } }; int i; int iDc = 0; int iSectorSize = 0; int setSectorsize = 0; int setDeviceChar = 0; for(i=0; i<objc; i+=2){ int nOpt; char *zOpt = Tcl_GetStringFromObj(objv[i], &nOpt); if( (nOpt>11 || nOpt<2 || strncmp("-sectorsize", zOpt, nOpt)) && (nOpt>16 || nOpt<2 || strncmp("-characteristics", zOpt, nOpt)) ){ Tcl_AppendResult(interp, "Bad option: \"", zOpt, "\" - must be \"-characteristics\" or \"-sectorsize\"", 0 ); return TCL_ERROR; } if( i==objc-1 ){ Tcl_AppendResult(interp, "Option requires an argument: \"", zOpt, "\"",0); return TCL_ERROR; } if( zOpt[1]=='s' ){ if( Tcl_GetIntFromObj(interp, objv[i+1], &iSectorSize) ){ return TCL_ERROR; } setSectorsize = 1; }else{ int j; Tcl_Obj **apObj; int nObj; if( Tcl_ListObjGetElements(interp, objv[i+1], &nObj, &apObj) ){ return TCL_ERROR; } for(j=0; j<nObj; j++){ int rc; int iChoice; Tcl_Obj *pFlag = Tcl_DuplicateObj(apObj[j]); Tcl_IncrRefCount(pFlag); Tcl_UtfToLower(Tcl_GetString(pFlag)); rc = Tcl_GetIndexFromObjStruct( interp, pFlag, aFlag, sizeof(aFlag[0]), "no such flag", 0, &iChoice ); Tcl_DecrRefCount(pFlag); if( rc ){ return TCL_ERROR; } iDc |= aFlag[iChoice].iValue; } setDeviceChar = 1; } } if( setDeviceChar ){ *piDeviceChar = iDc; } if( setSectorsize ){ *piSectorSize = iSectorSize; } return TCL_OK; } /* ** tclcmd: sqlite3_crash_now ** ** Simulate a crash immediately. This function does not return ** (writeListSync() calls exit(-1)). */ static int SQLITE_TCLAPI crashNowCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ if( objc!=1 ){ Tcl_WrongNumArgs(interp, 1, objv, ""); return TCL_ERROR; } writeListSync(0, 1); assert( 0 ); return TCL_OK; } /* ** tclcmd: sqlite_crash_enable ENABLE ?DEFAULT? ** ** Parameter ENABLE must be a boolean value. If true, then the "crash" ** vfs is added to the system. If false, it is removed. */ static int SQLITE_TCLAPI crashEnableCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ int isEnable; int isDefault = 0; static sqlite3_vfs crashVfs = { 2, /* iVersion */ 0, /* szOsFile */ 0, /* mxPathname */ 0, /* pNext */ "crash", /* zName */ 0, /* pAppData */ cfOpen, /* xOpen */ cfDelete, /* xDelete */ cfAccess, /* xAccess */ cfFullPathname, /* xFullPathname */ cfDlOpen, /* xDlOpen */ cfDlError, /* xDlError */ cfDlSym, /* xDlSym */ cfDlClose, /* xDlClose */ cfRandomness, /* xRandomness */ cfSleep, /* xSleep */ cfCurrentTime, /* xCurrentTime */ cfGetLastError, /* xGetLastError */ 0, /* xCurrentTimeInt64 */ }; if( objc!=2 && objc!=3 ){ Tcl_WrongNumArgs(interp, 1, objv, "ENABLE ?DEFAULT?"); return TCL_ERROR; } if( Tcl_GetBooleanFromObj(interp, objv[1], &isEnable) ){ return TCL_ERROR; } if( objc==3 && Tcl_GetBooleanFromObj(interp, objv[2], &isDefault) ){ return TCL_ERROR; } if( (isEnable && crashVfs.pAppData) || (!isEnable && !crashVfs.pAppData) ){ return TCL_OK; } if( crashVfs.pAppData==0 ){ sqlite3_vfs *pOriginalVfs = sqlite3_vfs_find(0); crashVfs.mxPathname = pOriginalVfs->mxPathname; crashVfs.pAppData = (void *)pOriginalVfs; crashVfs.szOsFile = sizeof(CrashFile) + pOriginalVfs->szOsFile; sqlite3_vfs_register(&crashVfs, isDefault); }else{ crashVfs.pAppData = 0; sqlite3_vfs_unregister(&crashVfs); } return TCL_OK; } /* ** tclcmd: sqlite_crashparams ?OPTIONS? DELAY CRASHFILE ** ** This procedure implements a TCL command that enables crash testing ** in testfixture. Once enabled, crash testing cannot be disabled. ** ** Available options are "-characteristics" and "-sectorsize". Both require ** an argument. For -sectorsize, this is the simulated sector size in ** bytes. For -characteristics, the argument must be a list of io-capability ** flags to simulate. Valid flags are "atomic", "atomic512", "atomic1K", ** "atomic2K", "atomic4K", "atomic8K", "atomic16K", "atomic32K", ** "atomic64K", "sequential" and "safe_append". ** ** Example: ** ** sqlite_crashparams -sect 1024 -char {atomic sequential} ./test.db 1 ** */ static int SQLITE_TCLAPI crashParamsObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ int iDelay; const char *zCrashFile; int nCrashFile, iDc, iSectorSize; iDc = -1; iSectorSize = -1; if( objc<3 ){ Tcl_WrongNumArgs(interp, 1, objv, "?OPTIONS? DELAY CRASHFILE"); goto error; } zCrashFile = Tcl_GetStringFromObj(objv[objc-1], &nCrashFile); if( nCrashFile>=sizeof(g.zCrashFile) ){ Tcl_AppendResult(interp, "Filename is too long: \"", zCrashFile, "\"", 0); goto error; } if( Tcl_GetIntFromObj(interp, objv[objc-2], &iDelay) ){ goto error; } if( processDevSymArgs(interp, objc-3, &objv[1], &iDc, &iSectorSize) ){ return TCL_ERROR; } if( iDc>=0 ){ g.iDeviceCharacteristics = iDc; } if( iSectorSize>=0 ){ g.iSectorSize = iSectorSize; } g.iCrash = iDelay; memcpy(g.zCrashFile, zCrashFile, nCrashFile+1); sqlite3CrashTestEnable = 1; return TCL_OK; error: return TCL_ERROR; } static int SQLITE_TCLAPI devSymObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ void devsym_register(int iDeviceChar, int iSectorSize); int iDc = -1; int iSectorSize = -1; if( processDevSymArgs(interp, objc-1, &objv[1], &iDc, &iSectorSize) ){ return TCL_ERROR; } devsym_register(iDc, iSectorSize); return TCL_OK; } /* ** tclcmd: sqlite3_crash_on_write N */ static int SQLITE_TCLAPI writeCrashObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ void devsym_crash_on_write(int); int nWrite = 0; if( objc!=2 ){ Tcl_WrongNumArgs(interp, 1, objv, "NWRITE"); return TCL_ERROR; } if( Tcl_GetIntFromObj(interp, objv[1], &nWrite) ){ return TCL_ERROR; } devsym_crash_on_write(nWrite); return TCL_OK; } /* ** tclcmd: unregister_devsim */ static int SQLITE_TCLAPI dsUnregisterObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ void devsym_unregister(void); if( objc!=1 ){ Tcl_WrongNumArgs(interp, 1, objv, ""); return TCL_ERROR; } devsym_unregister(); return TCL_OK; } /* ** tclcmd: register_jt_vfs ?-default? PARENT-VFS */ static int SQLITE_TCLAPI jtObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ int jt_register(char *, int); char *zParent = 0; if( objc!=2 && objc!=3 ){ Tcl_WrongNumArgs(interp, 1, objv, "?-default? PARENT-VFS"); return TCL_ERROR; } zParent = Tcl_GetString(objv[1]); if( objc==3 ){ if( strcmp(zParent, "-default") ){ Tcl_AppendResult(interp, "bad option \"", zParent, "\": must be -default", 0 ); return TCL_ERROR; } zParent = Tcl_GetString(objv[2]); } if( !(*zParent) ){ zParent = 0; } if( jt_register(zParent, objc==3) ){ Tcl_AppendResult(interp, "Error in jt_register", 0); return TCL_ERROR; } return TCL_OK; } /* ** tclcmd: unregister_jt_vfs */ static int SQLITE_TCLAPI jtUnregisterObjCmd( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ void jt_unregister(void); if( objc!=1 ){ Tcl_WrongNumArgs(interp, 1, objv, ""); return TCL_ERROR; } jt_unregister(); return TCL_OK; } #endif /* SQLITE_OMIT_DISKIO */ /* ** This procedure registers the TCL procedures defined in this file. */ int Sqlitetest6_Init(Tcl_Interp *interp){ #ifndef SQLITE_OMIT_DISKIO Tcl_CreateObjCommand(interp, "sqlite3_crash_enable", crashEnableCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_crashparams", crashParamsObjCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_crash_now", crashNowCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_simulate_device", devSymObjCmd, 0, 0); Tcl_CreateObjCommand(interp, "sqlite3_crash_on_write", writeCrashObjCmd,0,0); Tcl_CreateObjCommand(interp, "unregister_devsim", dsUnregisterObjCmd, 0, 0); Tcl_CreateObjCommand(interp, "register_jt_vfs", jtObjCmd, 0, 0); Tcl_CreateObjCommand(interp, "unregister_jt_vfs", jtUnregisterObjCmd, 0, 0); #endif return TCL_OK; } #endif /* SQLITE_TEST */
// Copyright 2013 Beego Authors // Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package cache import ( "crypto/md5" "encoding/hex" "fmt" "io/ioutil" "log" "os" "path/filepath" "sync" "time" "github.com/Unknwon/com" "gopkg.in/macaron.v1" ) // Item represents a cache item. type Item struct { Val interface{} Created int64 Expire int64 } func (item *Item) hasExpired() bool { return item.Expire > 0 && (time.Now().Unix()-item.Created) >= item.Expire } // FileCacher represents a file cache adapter implementation. type FileCacher struct { lock sync.Mutex rootPath string interval int // GC interval. } // NewFileCacher creates and returns a new file cacher. func NewFileCacher() *FileCacher { return &FileCacher{} } func (c *FileCacher) filepath(key string) string { m := md5.Sum([]byte(key)) hash := hex.EncodeToString(m[:]) return filepath.Join(c.rootPath, string(hash[0]), string(hash[1]), hash) } // Put puts value into cache with key and expire time. // If expired is 0, it will not be deleted by GC. func (c *FileCacher) Put(key string, val interface{}, expire int64) error { filename := c.filepath(key) item := &Item{val, time.Now().Unix(), expire} data, err := EncodeGob(item) if err != nil { return err } os.MkdirAll(filepath.Dir(filename), os.ModePerm) return ioutil.WriteFile(filename, data, os.ModePerm) } func (c *FileCacher) read(key string) (*Item, error) { filename := c.filepath(key) data, err := ioutil.ReadFile(filename) if err != nil { return nil, err } item := new(Item) return item, DecodeGob(data, item) } // Get gets cached value by given key. func (c *FileCacher) Get(key string) interface{} { item, err := c.read(key) if err != nil { return nil } if item.hasExpired() { os.Remove(c.filepath(key)) return nil } return item.Val } // Delete deletes cached value by given key. func (c *FileCacher) Delete(key string) error { return os.Remove(c.filepath(key)) } // Incr increases cached int-type value by given key as a counter. func (c *FileCacher) Incr(key string) error { item, err := c.read(key) if err != nil { return err } item.Val, err = Incr(item.Val) if err != nil { return err } return c.Put(key, item.Val, item.Expire) } // Decrease cached int value. func (c *FileCacher) Decr(key string) error { item, err := c.read(key) if err != nil { return err } item.Val, err = Decr(item.Val) if err != nil { return err } return c.Put(key, item.Val, item.Expire) } // IsExist returns true if cached value exists. func (c *FileCacher) IsExist(key string) bool { return com.IsExist(c.filepath(key)) } // Flush deletes all cached data. func (c *FileCacher) Flush() error { return os.RemoveAll(c.rootPath) } func (c *FileCacher) startGC() { c.lock.Lock() defer c.lock.Unlock() if c.interval < 1 { return } if err := filepath.Walk(c.rootPath, func(path string, fi os.FileInfo, err error) error { if err != nil { return fmt.Errorf("Walk: %v", err) } if fi.IsDir() { return nil } data, err := ioutil.ReadFile(path) if err != nil && !os.IsNotExist(err) { fmt.Errorf("ReadFile: %v", err) } item := new(Item) if err = DecodeGob(data, item); err != nil { return err } if item.hasExpired() { if err = os.Remove(path); err != nil && !os.IsNotExist(err) { return fmt.Errorf("Remove: %v", err) } } return nil }); err != nil { log.Printf("error garbage collecting cache files: %v", err) } time.AfterFunc(time.Duration(c.interval)*time.Second, func() { c.startGC() }) } // StartAndGC starts GC routine based on config string settings. func (c *FileCacher) StartAndGC(opt Options) error { c.lock.Lock() c.rootPath = opt.AdapterConfig c.interval = opt.Interval if !filepath.IsAbs(c.rootPath) { c.rootPath = filepath.Join(macaron.Root, c.rootPath) } c.lock.Unlock() if err := os.MkdirAll(c.rootPath, os.ModePerm); err != nil { return err } go c.startGC() return nil } func init() { Register("file", NewFileCacher()) }
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package jdk.nashorn.internal.parser; import static jdk.nashorn.internal.parser.TokenKind.LITERAL; import jdk.nashorn.internal.runtime.Source; /** * Basic parse/lex unit. * */ public class Token { private Token() { } /** * Create a compact form of token information. * @param type Type of token. * @param position Start position of the token in the source. * @param length Length of the token. * @return Token descriptor. */ public static long toDesc(final TokenType type, final int position, final int length) { return (long)position << 32 | (long)length << 8 | type.ordinal(); } /** * Extract token position from a token descriptor. * @param token Token descriptor. * @return Start position of the token in the source. */ public static int descPosition(final long token) { return (int)(token >>> 32); } /** * Extract token length from a token descriptor. * @param token Token descriptor. * @return Length of the token. */ public static int descLength(final long token) { return (int)token >>> 8; } /** * Extract token type from a token descriptor. * @param token Token descriptor. * @return Type of token. */ public static TokenType descType(final long token) { return TokenType.getValues()[(int)token & 0xff]; } /** * Change the token to use a new type. * * @param token The original token. * @param newType The new token type. * @return The recast token. */ public static long recast(final long token, final TokenType newType) { return token & ~0xFFL | newType.ordinal(); } /** * Return a string representation of a token. * @param source Token source. * @param token Token descriptor. * @param verbose True to include details. * @return String representation. */ public static String toString(final Source source, final long token, final boolean verbose) { final TokenType type = Token.descType(token); String result; if (source != null && type.getKind() == LITERAL) { result = source.getString(token); } else { result = type.getNameOrType(); } if (verbose) { final int position = Token.descPosition(token); final int length = Token.descLength(token); result += " (" + position + ", " + length + ")"; } return result; } /** * String conversion of token * * @param source the source * @param token the token * * @return token as string */ public static String toString(final Source source, final long token) { return Token.toString(source, token, false); } /** * String conversion of token - version without source given * * @param token the token * * @return token as string */ public static String toString(final long token) { return Token.toString(null, token, false); } /** * Static hash code computation function token * * @param token a token * * @return hash code for token */ public static int hashCode(final long token) { return (int)(token ^ token >>> 32); } }
// <auto-generated> // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for // license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is // regenerated. // </auto-generated> namespace Microsoft.Azure.ApplicationInsights.Query.Models { using Microsoft.Rest; using Newtonsoft.Json; using System.Collections; using System.Collections.Generic; using System.Linq; /// <summary> /// A query response. /// </summary> /// <remarks> /// Contains the tables, columns &amp; rows resulting from a query. /// </remarks> public partial class QueryResults { /// <summary> /// Initializes a new instance of the QueryResults class. /// </summary> public QueryResults() { CustomInit(); } /// <summary> /// Initializes a new instance of the QueryResults class. /// </summary> /// <param name="tables">The list of tables, columns and rows.</param> public QueryResults(IList<Table> tables) { Tables = tables; CustomInit(); } /// <summary> /// An initialization method that performs custom operations like setting defaults /// </summary> partial void CustomInit(); /// <summary> /// Gets or sets the list of tables, columns and rows. /// </summary> [JsonProperty(PropertyName = "tables")] public IList<Table> Tables { get; set; } /// <summary> /// Validate the object. /// </summary> /// <exception cref="ValidationException"> /// Thrown if validation fails /// </exception> public virtual void Validate() { if (Tables == null) { throw new ValidationException(ValidationRules.CannotBeNull, "Tables"); } if (Tables != null) { foreach (var element in Tables) { if (element != null) { element.Validate(); } } } } } }
require 'readline' list = [ 'search', 'download', 'open', 'help', 'history', 'quit', 'url', 'next', 'clear', 'prev', 'past', ].sort comp = proc{ |s| list.grep( /^#{Regexp.escape(s)}/) } Readline.completion_append_character = " " Readline.completion_proc = comp while line = Readline.readline('> ', true) p line break if line == 'quit' end
<?php /** * TextHelperTest file * * PHP 5 * * CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html> * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * * Licensed under The MIT License * For full copyright and license information, please see the LICENSE.txt * Redistributions of files must retain the above copyright notice * * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * @link http://book.cakephp.org/2.0/en/development/testing.html CakePHP(tm) Tests * @package Cake.Test.Case.View.Helper * @since CakePHP(tm) v 1.2.0.4206 * @license MIT License (http://www.opensource.org/licenses/mit-license.php) */ App::uses('View', 'View'); App::uses('TextHelper', 'View/Helper'); class TextHelperTestObject extends TextHelper { public function attach(StringMock $string) { $this->_engine = $string; } public function engine() { return $this->_engine; } } /** * StringMock class */ class StringMock { } /** * TextHelperTest class * * @package Cake.Test.Case.View.Helper */ class TextHelperTest extends CakeTestCase { /** * setUp method * * @return void */ public function setUp() { parent::setUp(); $this->View = new View(null); $this->Text = new TextHelper($this->View); } /** * tearDown method * * @return void */ public function tearDown() { unset($this->View); parent::tearDown(); } /** * test String class methods are called correctly */ public function testTextHelperProxyMethodCalls() { $methods = array( 'highlight', 'stripLinks', 'truncate', 'excerpt', 'toList', ); $String = $this->getMock('StringMock', $methods); $Text = new TextHelperTestObject($this->View, array('engine' => 'StringMock')); $Text->attach($String); foreach ($methods as $method) { $String->expects($this->at(0))->method($method); $Text->{$method}('who', 'what', 'when', 'where', 'how'); } } /** * test engine override */ public function testEngineOverride() { App::build(array( 'Utility' => array(CAKE . 'Test' . DS . 'test_app' . DS . 'Utility' . DS) ), App::REGISTER); $Text = new TextHelperTestObject($this->View, array('engine' => 'TestAppEngine')); $this->assertInstanceOf('TestAppEngine', $Text->engine()); App::build(array( 'Plugin' => array(CAKE . 'Test' . DS . 'test_app' . DS . 'Plugin' . DS) )); CakePlugin::load('TestPlugin'); $Text = new TextHelperTestObject($this->View, array('engine' => 'TestPlugin.TestPluginEngine')); $this->assertInstanceOf('TestPluginEngine', $Text->engine()); CakePlugin::unload('TestPlugin'); } /** * testAutoLink method * * @return void */ public function testAutoLink() { $text = 'This is a test text'; $expected = 'This is a test text'; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = 'Text with a partial www.cakephp.org URL and test@cakephp.org email address'; $result = $this->Text->autoLink($text); $expected = 'Text with a partial <a href="http://www.cakephp.org">www.cakephp.org</a> URL and <a href="mailto:test@cakephp\.org">test@cakephp\.org</a> email address'; $this->assertRegExp('#^' . $expected . '$#', $result); $text = 'This is a test text with URL http://www.cakephp.org'; $expected = 'This is a test text with URL <a href="http://www.cakephp.org">http://www.cakephp.org</a>'; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = 'This is a test text with URL http://www.cakephp.org and some more text'; $expected = 'This is a test text with URL <a href="http://www.cakephp.org">http://www.cakephp.org</a> and some more text'; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = "This is a test text with URL http://www.cakephp.org\tand some more text"; $expected = "This is a test text with URL <a href=\"http://www.cakephp.org\">http://www.cakephp.org</a>\tand some more text"; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = 'This is a test text with URL http://www.cakephp.org(and some more text)'; $expected = 'This is a test text with URL <a href="http://www.cakephp.org">http://www.cakephp.org</a>(and some more text)'; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = 'This is a test text with URL http://www.cakephp.org'; $expected = 'This is a test text with URL <a href="http://www.cakephp.org" class="link">http://www.cakephp.org</a>'; $result = $this->Text->autoLink($text, array('class' => 'link')); $this->assertEquals($expected, $result); $text = 'This is a test text with URL http://www.cakephp.org'; $expected = 'This is a test text with URL <a href="http://www.cakephp.org" class="link" id="MyLink">http://www.cakephp.org</a>'; $result = $this->Text->autoLink($text, array('class' => 'link', 'id' => 'MyLink')); $this->assertEquals($expected, $result); } /** * Test escaping for autoLink * * @return void */ public function testAutoLinkEscape() { $text = 'This is a <b>test</b> text with URL http://www.cakephp.org'; $expected = 'This is a &lt;b&gt;test&lt;/b&gt; text with URL <a href="http://www.cakephp.org">http://www.cakephp.org</a>'; $result = $this->Text->autoLink($text); $this->assertEquals($expected, $result); $text = 'This is a <b>test</b> text with URL http://www.cakephp.org'; $expected = 'This is a <b>test</b> text with URL <a href="http://www.cakephp.org">http://www.cakephp.org</a>'; $result = $this->Text->autoLink($text, array('escape' => false)); $this->assertEquals($expected, $result); } /** * Data provider for autoLinking */ public static function autoLinkProvider() { return array( array( 'This is a test text', 'This is a test text', ), array( 'This is a test that includes (www.cakephp.org)', 'This is a test that includes (<a href="http://www.cakephp.org">www.cakephp.org</a>)', ), array( 'This is a test that includes www.cakephp.org:8080', 'This is a test that includes <a href="http://www.cakephp.org:8080">www.cakephp.org:8080</a>', ), array( 'This is a test that includes http://de.wikipedia.org/wiki/Kanton_(Schweiz)#fragment', 'This is a test that includes <a href="http://de.wikipedia.org/wiki/Kanton_(Schweiz)#fragment">http://de.wikipedia.org/wiki/Kanton_(Schweiz)#fragment</a>', ), array( 'This is a test that includes www.wikipedia.org/wiki/Kanton_(Schweiz)#fragment', 'This is a test that includes <a href="http://www.wikipedia.org/wiki/Kanton_(Schweiz)#fragment">www.wikipedia.org/wiki/Kanton_(Schweiz)#fragment</a>', ), array( 'This is a test that includes http://example.com/test.php?foo=bar text', 'This is a test that includes <a href="http://example.com/test.php?foo=bar">http://example.com/test.php?foo=bar</a> text', ), array( 'This is a test that includes www.example.com/test.php?foo=bar text', 'This is a test that includes <a href="http://www.example.com/test.php?foo=bar">www.example.com/test.php?foo=bar</a> text', ), array( 'Text with a partial www.cakephp.org URL', 'Text with a partial <a href="http://www.cakephp.org">www.cakephp.org</a> URL', ), array( 'Text with a partial WWW.cakephp.org URL', 'Text with a partial <a href="http://WWW.cakephp.org">WWW.cakephp.org</a> URL', ), array( 'Text with a partial WWW.cakephp.org &copy, URL', 'Text with a partial <a href="http://WWW.cakephp.org">WWW.cakephp.org</a> &amp;copy, URL', ), array( 'Text with a url www.cot.ag/cuIb2Q and more', 'Text with a url <a href="http://www.cot.ag/cuIb2Q">www.cot.ag/cuIb2Q</a> and more', ), array( 'Text with a url http://www.does--not--work.com and more', 'Text with a url <a href="http://www.does--not--work.com">http://www.does--not--work.com</a> and more', ), array( 'Text with a url http://www.not--work.com and more', 'Text with a url <a href="http://www.not--work.com">http://www.not--work.com</a> and more', ), ); } /** * testAutoLinkUrls method * * @dataProvider autoLinkProvider * @return void */ public function testAutoLinkUrls($text, $expected) { $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); } /** * Test the options for autoLinkUrls * * @return void */ public function testAutoLinkUrlsOptions() { $text = 'Text with a partial www.cakephp.org URL'; $expected = 'Text with a partial <a href="http://www.cakephp.org" \s*class="link">www.cakephp.org</a> URL'; $result = $this->Text->autoLinkUrls($text, array('class' => 'link')); $this->assertRegExp('#^' . $expected . '$#', $result); $text = 'Text with a partial WWW.cakephp.org &copy; URL'; $expected = 'Text with a partial <a href="http://WWW.cakephp.org"\s*>WWW.cakephp.org</a> &copy; URL'; $result = $this->Text->autoLinkUrls($text, array('escape' => false)); $this->assertRegExp('#^' . $expected . '$#', $result); } /** * Test autoLinkUrls with the escape option. * * @return void */ public function testAutoLinkUrlsEscape() { $text = 'Text with a partial <a href="http://www.cakephp.org">link</a> link'; $expected = 'Text with a partial <a href="http://www.cakephp.org">link</a> link'; $result = $this->Text->autoLinkUrls($text, array('escape' => false)); $this->assertEquals($expected, $result); $text = 'Text with a partial <iframe src="http://www.cakephp.org" /> link'; $expected = 'Text with a partial <iframe src="http://www.cakephp.org" /> link'; $result = $this->Text->autoLinkUrls($text, array('escape' => false)); $this->assertEquals($expected, $result); $text = 'Text with a partial <iframe src="http://www.cakephp.org" /> link'; $expected = 'Text with a partial &lt;iframe src=&quot;http://www.cakephp.org&quot; /&gt; link'; $result = $this->Text->autoLinkUrls($text, array('escape' => true)); $this->assertEquals($expected, $result); $text = 'Text with a url <a href="http://www.not-working-www.com">www.not-working-www.com</a> and more'; $expected = 'Text with a url &lt;a href=&quot;http://www.not-working-www.com&quot;&gt;www.not-working-www.com&lt;/a&gt; and more'; $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); $text = 'Text with a url www.not-working-www.com and more'; $expected = 'Text with a url <a href="http://www.not-working-www.com">www.not-working-www.com</a> and more'; $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); $text = 'Text with a url http://www.not-working-www.com and more'; $expected = 'Text with a url <a href="http://www.not-working-www.com">http://www.not-working-www.com</a> and more'; $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); $text = 'Text with a url http://www.www.not-working-www.com and more'; $expected = 'Text with a url <a href="http://www.www.not-working-www.com">http://www.www.not-working-www.com</a> and more'; $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); } /** * testAutoLinkEmails method * * @return void */ public function testAutoLinkEmails() { $text = 'This is a test text'; $expected = 'This is a test text'; $result = $this->Text->autoLinkUrls($text); $this->assertEquals($expected, $result); $text = 'Text with email@example.com address'; $expected = 'Text with <a href="mailto:email@example.com"\s*>email@example.com</a> address'; $result = $this->Text->autoLinkEmails($text); $this->assertRegExp('#^' . $expected . '$#', $result); $text = "Text with o'hare._-bob@example.com address"; $expected = 'Text with <a href="mailto:o&#039;hare._-bob@example.com">o&#039;hare._-bob@example.com</a> address'; $result = $this->Text->autoLinkEmails($text); $this->assertEquals($expected, $result); $text = 'Text with email@example.com address'; $expected = 'Text with <a href="mailto:email@example.com" \s*class="link">email@example.com</a> address'; $result = $this->Text->autoLinkEmails($text, array('class' => 'link')); $this->assertRegExp('#^' . $expected . '$#', $result); } /** * test invalid email addresses. * * @return void */ public function testAutoLinkEmailInvalid() { $result = $this->Text->autoLinkEmails('this is a myaddress@gmx-de test'); $expected = 'this is a myaddress@gmx-de test'; $this->assertEquals($expected, $result); } }
// ------------------------------------ // #POSTCSS - LOAD OPTIONS - OPTIONS // ------------------------------------ 'use strict' /** * * @method options * * @param {Object} options PostCSS Config * * @return {Object} options PostCSS Options */ module.exports = function options (options) { if (options.parser) { options.parser = require(options.parser) } if (options.syntax) { options.syntax = require(options.syntax) } if (options.stringifier) { options.stringifier = require(options.stringifier) } if (options.plugins) { delete options.plugins } return options }
/* * My97 DatePicker 4.6 * Ƥ·ôÃû³Æ:default */ /* ÈÕÆÚÑ¡ÔñÈÝÆ÷ DIV */ .WdateDiv{ width:180px; background-color:#FFFFFF; border:#bbb 1px solid; padding:2px; } .WdateDiv *{font-size:9pt;} /**************************** * µ¼º½Í¼±ê ***************************/ .WdateDiv .NavImg { cursor:pointer; width:16px; height:16px; margin-top:1px; } .WdateDiv .NavImgll { background:url(img.gif) no-repeat; } .WdateDiv .NavImgl { background:url(img.gif) no-repeat -16px 0px; } .WdateDiv .NavImgr { background:url(img.gif) no-repeat -32px 0px; } .WdateDiv .NavImgrr { background:url(img.gif) no-repeat -48px 0px; } /**************************** * Äê·ÝÔ·ÝÏà¹Ø ***************************/ /* Äê·ÝÔ·ÝÀ¸ DIV */ .WdateDiv #dpTitle{ height:24px; margin-bottom:2px; padding:1px; } /* Äê·ÝÔ·ÝÊäÈë¿ò INPUT */ .WdateDiv .yminput{ margin-top:2px; text-align:center; border:0px; height:16px; width:50px; cursor:pointer; } /* Äê·ÝÔ·ÝÊäÈë¿ò»ñµÃ½¹µãʱµÄÑùʽ INPUT */ .WdateDiv .yminputfocus{ margin-top:2px; text-align:center; font-weight:bold; color:blue; border:#ccc 1px solid; height:16px; width:50px; } /* ²Ëµ¥Ñ¡Ôñ¿ò DIV */ .WdateDiv .menuSel{ position:absolute; background-color:#FFFFFF; border:#ccc 1px solid; display:none; } /* ²Ëµ¥µÄÑùʽ TD */ .WdateDiv .menu{ cursor:pointer; background-color:#fff; } /* ²Ëµ¥µÄmouseoverÑùʽ TD */ .WdateDiv .menuOn{ cursor:pointer; background-color:#BEEBEE; } /* ²Ëµ¥ÎÞЧʱµÄÑùʽ TD */ .WdateDiv .invalidMenu{ color:#aaa; } /* ÄêÑ¡Ôñ¿òµÄÆ«ÒÆ DIV */ .WdateDiv .YMenu{ margin-top:16px; } /* ÔÂÑ¡Ôñ¿òµÄÆ«ÒÆ DIV */ .WdateDiv .MMenu{ margin-top:16px; *width:62px; } /* ʱѡÔñ¿òµÄλÖà DIV */ .WdateDiv .hhMenu{ margin-top:-90px; margin-left:26px; } /* ·ÖÑ¡Ôñ¿òµÄλÖà DIV */ .WdateDiv .mmMenu{ margin-top:-46px; margin-left:26px; } /* ÃëÑ¡Ôñ¿òµÄλÖà DIV */ .WdateDiv .ssMenu{ margin-top:-24px; margin-left:26px; } /**************************** * ÖÜÏà¹Ø ***************************/ .WdateDiv .Wweek { text-align:center; background:#DAF3F5; border-right:#BDEBEE 1px solid; } /**************************** * ÐÇÆÚ,ÈÕÆÚÏà¹Ø ***************************/ /* ÐÇÆÚÀ¸ TR */ .WdateDiv .MTitle{ background-color:#BDEBEE; } /* ÈÕÆÚÀ¸±í¸ñ TABLE */ .WdateDiv .WdayTable{ line-height:20px; border:#c5d9e8 1px solid; } /* ÈÕÆÚ¸ñµÄÑùʽ TD */ .WdateDiv .Wday{ cursor:pointer; } /* ÈÕÆÚ¸ñµÄmouseoverÑùʽ TD */ .WdateDiv .WdayOn{ cursor:pointer; background-color:#C0EBEF; } /* ÖÜÄ©ÈÕÆÚ¸ñµÄÑùʽ TD */ .WdateDiv .Wwday{ cursor:pointer; color:#FF2F2F; } /* ÖÜÄ©ÈÕÆÚ¸ñµÄmouseoverÑùʽ TD */ .WdateDiv .WwdayOn{ cursor:pointer; color:#000; background-color:#C0EBEF; } .WdateDiv .Wtoday{ cursor:pointer; color:blue; } .WdateDiv .Wselday{ background-color:#A9E4E9; } /* ÆäËûÔ·ݵÄÈÕÆÚ */ .WdateDiv .WotherDay{ cursor:pointer; color:#6A6AFF; } /* ÆäËûÔ·ݵÄÈÕÆÚmouseoverÑùʽ */ .WdateDiv .WotherDayOn{ cursor:pointer; background-color:#C0EBEF; } /* ÎÞЧÈÕÆÚµÄÑùʽ,¼´ÔÚÈÕÆÚ·¶Î§ÒÔÍâÈÕÆÚ¸ñµÄÑùʽ,²»ÄÜÑ¡ÔñµÄÈÕÆÚ */ .WdateDiv .WinvalidDay{ color:#aaa; } /* ÌØÊâÈÕÆÚµÄÑùʽ */ .WdateDiv .WspecialDay{ background-color:#66F4DF; } /**************************** * ʱ¼äÏà¹Ø ***************************/ /* ʱ¼äÀ¸ DIV */ .WdateDiv #dpTime{ float:left; margin-top:3px; margin-right:30px; } /* ʱ¼äÎÄ×Ö SPAN */ .WdateDiv #dpTime #dpTimeStr{ margin-left:1px; } /* ʱ¼äÊäÈë¿ò INPUT */ .WdateDiv #dpTime input{ height:16px; width:18px; text-align:center; border:#ccc 1px solid; } /* ʱ¼ä ʱ INPUT */ .WdateDiv #dpTime .tB{ border-right:0px; } /* ʱ¼ä ·ÖºÍ¼ä¸ô·û ':' INPUT */ .WdateDiv #dpTime .tE{ border-left:0; border-right:0; } /* ʱ¼ä Ãë INPUT */ .WdateDiv #dpTime .tm{ width:7px; border-left:0; border-right:0; } /* ʱ¼äÓұߵÄÏòÉϰ´Å¥ BUTTON */ .WdateDiv #dpTime #dpTimeUp{ height:10px; width:13px; border:0px; background:url(img.gif) no-repeat -32px -16px; } /* ʱ¼äÓұߵÄÏòϰ´Å¥ BUTTON */ .WdateDiv #dpTime #dpTimeDown{ height:10px; width:13px; border:0px; background:url(img.gif) no-repeat -48px -16px; } /**************************** * ÆäËû ***************************/ .WdateDiv #dpQS { float:left; margin-right:3px; margin-top:3px; background:url(img.gif) no-repeat 0px -16px; width:20px; height:20px; cursor:pointer; } .WdateDiv #dpControl { text-align:right; margin-top:3px; } .WdateDiv .dpButton{ height:20px; width:45px; border:#ccc 1px solid; padding:2px; }
// <auto-generated> // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for // license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is // regenerated. // </auto-generated> namespace Microsoft.Azure.EventGrid.Models { using Newtonsoft.Json; using System.Collections; using System.Collections.Generic; using System.Linq; /// <summary> /// Job Output Progress Event Data. /// </summary> public partial class MediaJobOutputProgressEventData { /// <summary> /// Initializes a new instance of the MediaJobOutputProgressEventData /// class. /// </summary> public MediaJobOutputProgressEventData() { CustomInit(); } /// <summary> /// Initializes a new instance of the MediaJobOutputProgressEventData /// class. /// </summary> /// <param name="label">Gets the Job output label.</param> /// <param name="progress">Gets the Job output progress.</param> /// <param name="jobCorrelationData">Gets the Job correlation /// data.</param> public MediaJobOutputProgressEventData(string label = default(string), long? progress = default(long?), IDictionary<string, string> jobCorrelationData = default(IDictionary<string, string>)) { Label = label; Progress = progress; JobCorrelationData = jobCorrelationData; CustomInit(); } /// <summary> /// An initialization method that performs custom operations like setting defaults /// </summary> partial void CustomInit(); /// <summary> /// Gets the Job output label. /// </summary> [JsonProperty(PropertyName = "label")] public string Label { get; set; } /// <summary> /// Gets the Job output progress. /// </summary> [JsonProperty(PropertyName = "progress")] public long? Progress { get; set; } /// <summary> /// Gets the Job correlation data. /// </summary> [JsonProperty(PropertyName = "jobCorrelationData")] public IDictionary<string, string> JobCorrelationData { get; set; } } }
class WildBoar < ActiveRecord::Base end
// Copyright (c) ppy Pty Ltd <contact@ppy.sh>. Licensed under the MIT Licence. // See the LICENCE file in the repository root for full licence text. using System; using osu.Framework.Graphics; using osu.Framework.Graphics.Containers; using osu.Framework.Graphics.Shapes; using osu.Game.Graphics; using osuTK; using osuTK.Graphics; namespace osu.Game.Tests.Visual.UserInterface { public class TestSceneDrawableDate : OsuTestScene { public TestSceneDrawableDate() { Child = new FillFlowContainer { Direction = FillDirection.Vertical, AutoSizeAxes = Axes.Both, Origin = Anchor.Centre, Anchor = Anchor.Centre, Children = new Drawable[] { new PokeyDrawableDate(DateTimeOffset.Now.Subtract(TimeSpan.FromSeconds(60))), new PokeyDrawableDate(DateTimeOffset.Now.Subtract(TimeSpan.FromSeconds(55))), new PokeyDrawableDate(DateTimeOffset.Now.Subtract(TimeSpan.FromSeconds(50))), new PokeyDrawableDate(DateTimeOffset.Now), new PokeyDrawableDate(DateTimeOffset.Now.Add(TimeSpan.FromSeconds(60))), new PokeyDrawableDate(DateTimeOffset.Now.Add(TimeSpan.FromSeconds(65))), new PokeyDrawableDate(DateTimeOffset.Now.Add(TimeSpan.FromSeconds(70))), } }; } private class PokeyDrawableDate : CompositeDrawable { public PokeyDrawableDate(DateTimeOffset date) { const float box_size = 10; DrawableDate drawableDate; Box flash; AutoSizeAxes = Axes.Both; InternalChildren = new Drawable[] { flash = new Box { Colour = Color4.Yellow, Size = new Vector2(box_size), Anchor = Anchor.CentreLeft, Origin = Anchor.CentreLeft, Alpha = 0 }, drawableDate = new DrawableDate(date) { X = box_size + 2, } }; drawableDate.Current.ValueChanged += _ => flash.FadeOutFromOne(500); } } } }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>Flat UI - Free User Interface Kit</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <!-- Loading Bootstrap --> <link href="bootstrap/css/bootstrap.css" rel="stylesheet"> <!-- Loading Flat UI --> <link href="css/flat-ui.css" rel="stylesheet"> <link href="css/demo.css" rel="stylesheet"> <link rel="shortcut icon" href="images/favicon.ico"> <!-- HTML5 shim, for IE6-8 support of HTML5 elements. All other JS at the end of file. --> <!--[if lt IE 9]> <script src="js/html5shiv.js"></script> <![endif]--> </head> <body> <div class="container"> <div class="demo-headline"> <h1 class="demo-logo"> <div class="logo"></div> Flat UI <small>Free User Interface Kit</small> </h1> </div> <!-- /demo-headline --> <h1 class="demo-section-title">Basic elements</h1> <h3 class="demo-panel-title">Buttons</h3> <div class="row demo-row"> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-primary">Primary Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-warning">Warning Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-default">Default Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-danger">Danger Button</a> </div> </div> <!-- /row --> <div class="row demo-row"> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-success">Success Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-inverse">Inverse Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-info">Info Button</a> </div> <div class="col-md-3"> <a href="#fakelink" class="btn btn-block btn-lg btn-default disabled">Disabled Button</a> </div> </div> <!-- /row --> <h3 class="demo-panel-title">Menu</h3> <div class="row demo-row"> <div class="col-md-9"> <div class="navbar navbar-inverse"> <div class="navbar-header"> <button type="button" class="btn btn-navbar" data-toggle="collapse" data-target=".navbar-collapse-01"></button> </div> <div class="navbar-collapse collapse navbar-collapse-01"> <ul class="nav navbar-nav navbar-left"> <li> <a href="#fakelink"> Menu Item <span class="navbar-unread">1</span> </a> </li> <li class="active"> <a href="#fakelink"> Messages <span class="navbar-unread">1</span> </a> <ul> <li><a href="#fakelink">Element One</a></li> <li> <a href="#fakelink">Sub menu</a> <ul> <li><a href="#fakelink">Element One</a></li> <li><a href="#fakelink">Element Two</a></li> <li><a href="#fakelink">Element Three</a></li> </ul> <!-- /Sub menu --> </li> <li><a href="#fakelink">Element Three</a></li> </ul> <!-- /Sub menu --> </li> <li> <a href="#fakelink"> About Us </a> </li> </ul> </div><!--/.nav --> </div> </div> <div class="col-md-3"> <select name="herolist" value="X-Men" class="select-block"> <option value="0">Choose hero</option> <option value="1">Spider Man</option> <option value="2">Wolverine</option> <option value="3">Captain America</option> <option value="X-Men" selected="selected">X-Men</option> <option value="Crocodile">Crocodile</option> </select> </div> </div> <!-- /row --> <h3 class="demo-panel-title">Input</h3> <div class="row"> <div class="col-md-3"> <div class="form-group"> <input type="text" value="" placeholder="Inactive" class="form-control" /> </div> </div> <div class="col-md-3"> <div class="form-group has-error"> <input type="text" value="Error" class="form-control" /> </div> </div> <div class="col-md-3"> <div class="form-group has-success"> <input type="text" value="Success" class="form-control" /> <span class="input-icon fui-check-inverted"></span> </div> </div> <div class="col-md-3"> <div class="form-group"> <input type="text" value="Disabled" disabled="disabled" class="form-control" /> </div> </div> </div> <!-- /row --> <div class="row"> <div class="col-md-5"> <h3 class="demo-panel-title">Progress bars &amp; Sliders</h3> <div class="progress"> <div class="progress-bar" style="width: 45%;"></div> </div> <br/> <div class="progress"> <div class="progress-bar" style="width: 40%;"></div> <div class="progress-bar progress-bar-warning" style="width: 10%;"></div> <div class="progress-bar progress-bar-danger" style="width: 10%;"></div> <div class="progress-bar progress-bar-success" style="width: 10%;"></div> <div class="progress-bar progress-bar-info" style="width: 10%;"></div> </div> <br/> <div id="slider" class="ui-slider"> <div class="ui-slider-segment"></div> <div class="ui-slider-segment"></div> <div class="ui-slider-segment"></div> </div> </div> <!-- /sliders --> <div class="col-md-6 col-md-offset-1"> <h3 class="demo-panel-title">Navigation</h3> <div class="row demo-navigation"> <div class="col-md-6"> <div class="btn-toolbar"> <div class="btn-group"> <a class="btn btn-primary" href="#fakelink"><span class="fui-time"></span></a> <a class="btn btn-primary" href="#fakelink"><span class="fui-photo"></span></a> <a class="btn btn-primary active" href="#fakelink"><span class="fui-heart"></span></a> <a class="btn btn-primary" href="#fakelink"><span class="fui-eye"></span></a> </div> </div> <!-- /toolbar --> </div> <div class="col-md-6 demo-pager"> <ul class="pager"> <li class="previous"> <a href="#fakelink"> <span class="fui-arrow-left"></span> <span>All messages</span> </a> </li> <li class="next"> <a href="#fakelink"> <span class="fui-arrow-right"></span> </a> </li> </ul> <!-- /pager --> </div> </div> <!-- /demo-navigation --> <div class="pagination"> <ul> <li class="previous"><a href="#fakelink" class="fui-arrow-left"></a></li> <li class="active"><a href="#fakelink">1</a></li> <li><a href="#fakelink">2</a></li> <li><a href="#fakelink">3</a></li> <li><a href="#fakelink">4</a></li> <li><a href="#fakelink">5</a></li> <li><a href="#fakelink">6</a></li> <li><a href="#fakelink">7</a></li> <li><a href="#fakelink">8</a></li> <li class="next"><a href="#fakelink" class="fui-arrow-right"></a></li> </ul> </div> <!-- /pagination --> </div> <!-- /navigation --> </div> <!-- /row --> <div class="row"> <div class="col-md-3"> <h3 class="demo-panel-title">Checkboxes</h3> <label class="checkbox" for="checkbox1"> <input type="checkbox" value="" id="checkbox1" data-toggle="checkbox"> Unchecked </label> <label class="checkbox" for="checkbox2"> <input type="checkbox" checked="checked" value="" id="checkbox2" data-toggle="checkbox" checked=""> Checked </label> <label class="checkbox" for="checkbox3"> <input type="checkbox" value="" id="checkbox3" data-toggle="checkbox" disabled=""> Disabled unchecked </label> <label class="checkbox" for="checkbox4"> <input type="checkbox" checked="checked" value="" id="checkbox4" data-toggle="checkbox" disabled="" checked=""> Disabled checked </label> </div> <!-- /checkboxes col-md-3 --> <div class="col-md-3"> <h3 class="demo-panel-title">Radio Buttons</h3> <label class="radio"> <input type="radio" name="optionsRadios" id="optionsRadios1" value="option1" data-toggle="radio"> Radio is off </label> <label class="radio"> <input type="radio" name="optionsRadios" id="optionsRadios2" value="option1" data-toggle="radio" checked=""> Radio is on </label> <label class="radio"> <input type="radio" name="optionsRadiosDisabled" id="optionsRadios3" value="option2" data-toggle="radio" disabled=""> Disabled radio is off </label> <label class="radio"> <input type="radio" name="optionsRadiosDisabled" id="optionsRadios4" value="option2" data-toggle="radio" checked="" disabled=""> Disabled radio is on </label> </div> <!-- /radios col-md-3 --> <div class="col-md-3"> <h3 class="demo-panel-title">Switches</h3> <table width="100%"> <tr> <td width="50%" class="pbm"> <input type="checkbox" checked="" data-toggle="switch" /> </td> <td class="pbm"> <input type="checkbox" data-toggle="switch" /> </td> </tr> <tr> <td class="pbm"> <div class="switch switch-square" data-on-label="<i class='fui-check'></i>" data-off-label="<i class='fui-cross'></i>"> <input type="checkbox" /> </div> </td> <td class="pbm"> <div class="switch switch-square" data-on-label="<i class='fui-check'></i>" data-off-label="<i class='fui-cross'></i>"> <input type="checkbox" checked /> </div> </td> </tr> <tr> <td> <input type="checkbox" disabled data-toggle="switch" /> </td> <td> <input type="checkbox" checked disabled data-toggle="switch" /> </td> </tr> </table> </div> <!-- /toggles col-md-3 --> <div class="col-md-3"> <h3 class="demo-panel-title">Tags</h3> <input name="tagsinput" id="tagsinput" class="tagsinput" value="Clean,Fresh,Modern,Unique" /> </div> </div> <!-- /row --> <div class="row"> <div class="col-md-3"> <h3 class="demo-panel-title">Share</h3> <div class="share mrl"> <ul> <li> <label class="share-label" for="share-toggle2">Facebook</label> <input type="checkbox" data-toggle="switch" /> </li> <li> <label class="share-label" for="share-toggle4">Twitter</label> <input type="checkbox" checked="" data-toggle="switch" /> </li> <li> <label class="share-label" for="share-toggle6">Pinterest</label> <input type="checkbox" data-toggle="switch" /> </li> </ul> <a href="#" class="btn btn-primary btn-block btn-large">Share</a> </div> <!-- /share --> </div> <div class="col-md-3"> <div class="demo-tooltips"> <h3 class="demo-panel-title">Tooltips</h3> <p align="center" data-toggle="tooltip" data-placement="bottom" title="Tooltip under the text."></p> <p align="center" data-toggle="tooltip" title="Here is the sample of talltooltip that contains three lines or more. More."></p> </div> </div> <!-- /col-md-3 with tooltips --> </div> <!-- /row --> <div class="demo-row typography-row"> <div class="demo-title"> <h3 class="demo-panel-title">Typography</h3> </div> <div class="demo-content"> <div class="demo-type-example"> <h1><span class="demo-heading-note">Header 1</span>Showers across the W</h1> </div> <div class="demo-type-example"> <h2><span class="demo-heading-note">Header 2</span>Give this quartet a few</h2> </div> <div class="demo-type-example"> <h3><span class="demo-heading-note">Header 3</span>The Vatican transitions to a</h3> </div> <div class="demo-type-example"> <h4><span class="demo-heading-note">Header 4</span>Great American Bites: Telluride's Oak, The</h4> </div> <div class="demo-type-example"> <h5><span class="demo-heading-note">Header 5</span>Author Diane Alberts loves her some good</h5> </div> <div class="demo-type-example"> <h6><span class="demo-heading-note">Header 6</span>With the success of young-adult book-to-movie</h6> </div> <div class="demo-type-example"> <span class="demo-text-note">Paragraph</span> <p>Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. <strong>Donec ullamcorper</strong> nulla non metus auctor fringilla. Duis mollis, est non commodo luctus, nisi erat porttitor ligula, eget lacinia odio sem nec elit.</p> </div> <div class="demo-type-example"> <span class="demo-text-note">Image</span> <img src="images/exaple-image.jpg" alt="exaple-image" class="img-rounded img-responsive"> <p class="img-comment"><strong>Note:</strong> gravida at eget metus. Duis mollis, est non commodo luctus, nisi erat porttitor ligula, eget lacinia odio sem nec elit.</p> </div> <div class="demo-type-example"> <span class="demo-text-note">Lead Text</span> <p class="lead">Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus.</p> </div> <div class="demo-type-example"> <span class="demo-text-note">Quote</span> <blockquote> <p>Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec ullamcorper nulla non metus auctor fringilla. Duis mollis, est non commodo luctus.</p> <small>Steve Jobs, CEO Apple</small> </blockquote> </div> <div class="demo-type-example"> <span class="demo-text-note">Small Font</span> <p><small>Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec ullamcorper nulla non metus auctor fringilla. Duis mollis, est non commodo luctus, nisi erat porttitor ligula, eget lacinia odio sem nec elit.</small></p> </div> </div><!-- /.demo-content-wide --> </div><!-- /.demo-row --> <h3 class="demo-panel-title">Color Swatches</h3> <div class="row demo-swatches-row"> <div class="swatches-col"> <div class="pallete-item"> <dl class="palette palette-turquoise"> <dt>#1abc9c</dt> <dd>Turquoise</dd> </dl> <dl class="palette palette-green-sea"> <dt>#16a085</dt> <dd>Green sea</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-emerald"> <dt>#2ecc71</dt> <dd>Emerald</dd> </dl> <dl class="palette palette-nephritis"> <dt>#27ae60</dt> <dd>Nephritis</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-peter-river"> <dt>#3498db</dt> <dd>Peter river</dd> </dl> <dl class="palette palette-belize-hole"> <dt>#2980b9</dt> <dd>Belize hole</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-amethyst"> <dt>#9b59b6</dt> <dd>Amethyst</dd> </dl> <dl class="palette palette-wisteria"> <dt>#8e44ad</dt> <dd>Wisteria</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-wet-asphalt"> <dt>#34495e</dt> <dd>Wet asphalt</dd> </dl> <dl class="palette palette-midnight-blue"> <dt>#2c3e50</dt> <dd>Midnight blue</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-sun-flower"> <dt>#f1c40f</dt> <dd>Sun flower</dd> </dl> <dl class="palette palette-orange"> <dt>#f39c12</dt> <dd>Orange</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-carrot"> <dt>#e67e22</dt> <dd>Carrot</dd> </dl> <dl class="palette palette-pumpkin"> <dt>#d35400</dt> <dd>Pumpkin</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-alizarin"> <dt>#e74c3c</dt> <dd>Alizarin</dd> </dl> <dl class="palette palette-pomegranate"> <dt>#c0392b</dt> <dd>Pomegranate</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-clouds"> <dt>#ecf0f1</dt> <dd>Clouds</dd> </dl> <dl class="palette palette-silver"> <dt>#bdc3c7</dt> <dd>Silver</dd> </dl> </div> <div class="pallete-item"> <dl class="palette palette-concrete"> <dt>#95a5a6</dt> <dd>Concrete</dd> </dl> <dl class="palette palette-asbestos"> <dt>#7f8c8d</dt> <dd>Asbestos</dd> </dl> </div> </div> <!-- /swatches items --> <div class="swatches-desc-col"> <h6 class="palette-headline">SWATCHES</h6> <p class="palette-paragraph"> Colors &mdash; is almost the most important part of the <strong>Flat UI</strong>. Better to use different shades of provided colors than new. </p> <p class="palette-paragraph"> For your convenience we also provide <strong>Swatches Preset</strong> <span>(flat&#8209;ui&#8209;swatches.aco in the Pack folder).</span> </p> <p class="palette-paragraph"> <strong>No gradients, no shadows.</strong> </p> </div> <!-- /swatches desc --> </div> <!-- /swatches row --> <h3 class="demo-panel-title">Icons <small>(14)</small></h3> <div class="demo-illustrations"> <div class="demo-content"> <div><img src="images/icons/svg/toilet-paper.svg" alt="Toilet-Paper"></div> <div><img src="images/icons/svg/gift-box.svg" alt="Gift-Box"></div> <div><img src="images/icons/svg/pencils.svg" alt="Pensils"></div> <div><img src="images/icons/svg/clipboard.svg" alt="Clipboard"></div> <div><img src="images/icons/svg/retina.svg" alt="Retina"></div> <div><img src="images/icons/svg/compas.svg" alt="Compas"></div> <div><img src="images/icons/svg/map.svg" alt="Map"></div> <div><img src="images/icons/svg/chat.svg" alt="Chat"></div> <div><img src="images/icons/svg/mail.svg" alt="Mail"></div> <div><img src="images/icons/svg/book.svg" alt="Book"></div> <div><img src="images/icons/svg/calendar.svg" alt="Calendar"></div> <div><img src="images/icons/svg/paper-bag.svg" alt="Pocket"></div> <div><img src="images/icons/svg/clocks.svg" alt="Watches"></div> <div><img src="images/icons/svg/loop.svg" alt="Infinity-Loop"></div> </div> </div> <h3 class="demo-panel-title">Glyphs <small>(30)</small></h3> <div class="demo-icons"> <div class="demo-content"> <span class="fui-arrow-right"></span> <span class="fui-arrow-left"></span> <span class="fui-cmd"></span> <span class="fui-check-inverted"></span> <span class="fui-heart"></span> <span class="fui-location"></span> <span class="fui-plus"></span> <span class="fui-check"></span> <span class="fui-cross"></span> <span class="fui-list"></span> <span class="fui-new"></span> <span class="fui-video"></span> <span class="fui-photo"></span> <span class="fui-volume"></span> <span class="fui-time"></span> <span class="fui-eye"></span> <span class="fui-chat"></span> <span class="fui-search"></span> <span class="fui-user"></span> <span class="fui-mail"></span> <span class="fui-lock"></span> <span class="fui-gear"></span> <span class="fui-radio-unchecked"></span> <span class="fui-radio-checked"></span> <span class="fui-checkbox-unchecked"></span> <span class="fui-checkbox-checked"></span> <span class="fui-calendar-solid"></span> <span class="fui-pause"></span> <span class="fui-play"></span> <span class="fui-check-inverted-2"></span> </div> </div> <!-- /icon font row --> <h1 class="demo-section-title mbl pbl">Samples</h1> <div class="row demo-samples"> <div class="col-md-4"> <div class="todo"> <div class="todo-search"> <input class="todo-search-field" type="search" value="" placeholder="Search" /> </div> <ul> <li class="todo-done"> <div class="todo-icon fui-user"></div> <div class="todo-content"> <h4 class="todo-name"> Meet <strong>Adrian</strong> at <strong>6pm</strong> </h4> Times Square </div> </li> <li> <div class="todo-icon fui-list"></div> <div class="todo-content"> <h4 class="todo-name"> Chat with <strong>V.Kudinov</strong> </h4> Skype conference an 9 am </div> </li> <li> <div class="todo-icon fui-eye"></div> <div class="todo-content"> <h4 class="todo-name"> Watch <strong>Iron Man</strong> </h4> 1998 Broadway </div> </li> <li> <div class="todo-icon fui-time"></div> <div class="todo-content"> <h4 class="todo-name"> Fix bug on a <strong>Website</strong> </h4> As soon as possible </div> </li> </ul> </div> </div> <!-- /todo list --> <div class="col-md-8 demo-video"> <!--[if !IE]> --> <video class="video-js" controls preload="auto" width="620" height="349" poster="images/video/poster.jpg" data-setup="{}"> <source src="http://iurevych.github.com/Flat-UI-videos/big_buck_bunny.mp4" type="video/mp4"> <source src="http://iurevych.github.com/Flat-UI-videos/big_buck_bunny.webm" type="video/webm"> </video> <!-- <![endif]--> <!--[if IE]> <video class="video-js" controls preload="auto" width="620" height="256" poster="http://video-js.zencoder.com/oceans-clip.jpg" data-setup="{}"> <source src="http://video-js.zencoder.com/oceans-clip.mp4" type='video/mp4'/> <source src="http://video-js.zencoder.com/oceans-clip.webm" type='video/webm'/> </video> <![endif]--> </div> <!-- /video --> </div> <div class="row demo-tiles"> <div class="col-md-3"> <div class="tile"> <img src="images/icons/svg/compas.svg" alt="Compas" class="tile-image big-illustration"> <h3 class="tile-title">Web Oriented</h3> <p>100% convertable to HTML/CSS layout.</p> <a class="btn btn-primary btn-large btn-block" href="http://designmodo.com/flat">Get Pro</a> </div> </div> <div class="col-md-3"> <div class="tile"> <img src="images/icons/svg/loop.svg" alt="Infinity-Loop" class="tile-image"> <h3 class="tile-title">Easy to Customize</h3> <p>Vector-based shapes and minimum of layer styles.</p> <a class="btn btn-primary btn-large btn-block" href="http://designmodo.com/flat">Get Pro</a> </div> </div> <div class="col-md-3"> <div class="tile"> <img src="images/icons/svg/pencils.svg" alt="Pensils" class="tile-image"> <h3 class="tile-title">Color Swatches</h3> <p>Easy to add or change elements. </p> <a class="btn btn-primary btn-large btn-block" href="http://designmodo.com/flat">Get Pro</a> </div> </div> <div class="col-md-3"> <div class="tile tile-hot"> <img src="images/icons/svg/chat.svg" alt="Chat" class="tile-image"> <h3 class="tile-title">Free for Share</h3> <p>Your likes, shares and comments helps us.</p> <a class="btn btn-primary btn-large btn-block" href="http://designmodo.com/flat">Get Pro</a> </div> </div> </div> <!-- /tiles --> <div class="login"> <div class="login-screen"> <div class="login-icon"> <img src="images/login/icon.png" alt="Welcome to Mail App" /> <h4>Welcome to <small>Mail App</small></h4> </div> <div class="login-form"> <div class="form-group"> <input type="text" class="form-control login-field" value="" placeholder="Enter your name" id="login-name" /> <label class="login-field-icon fui-user" for="login-name"></label> </div> <div class="form-group"> <input type="password" class="form-control login-field" value="" placeholder="Password" id="login-pass" /> <label class="login-field-icon fui-lock" for="login-pass"></label> </div> <a class="btn btn-primary btn-lg btn-block" href="#">Login</a> <a class="login-link" href="#">Lost your password?</a> </div> </div> </div> <div class="row"> <div class="col-md-9"> <div class="demo-browser"> <div class="demo-browser-side"> <div class="demo-browser-author"></div> <div class="demo-browser-action"> <a class="btn btn-danger btn-lg btn-block" href="http://twitter.com/monstercritic" target="_blank"> Follow </a> </div> <h5>@monstercritic</h5> <h6> Tourist. Designer. NYC <a href="http://shmidt.in" target="_blank">shmidt.in</a> </h6> </div> <div class="demo-browser-content"> <img src="images/demo/browser-pic-1.jpg" alt="" /> <img src="images/demo/browser-pic-2.jpg" alt="" /> <img src="images/demo/browser-pic-3.jpg" alt="" /> <img src="images/demo/browser-pic-4.jpg" alt="" /> <img src="images/demo/browser-pic-5.jpg" alt="" /> <img src="images/demo/browser-pic-6.jpg" alt="" /> </div> </div> </div> <div class="col-md-3"> <div class="demo-download"> <img src="images/demo/html-icon.png" src="Free PSD" /> </div> <a href="https://github.com/designmodo/Flat-UI/archive/master.zip" class="btn btn-primary btn-lg btn-block">Download</a> <p class="demo-download-text">Your likes, shares and comments make us happy!</p> </div> </div> <!-- /download area --> </div> <!-- /container --> <footer> <div class="container"> <div class="row"> <div class="col-md-7"> <h3 class="footer-title">Subscribe</h3> <p>Do you like this freebie? Want to get more stuff like this?<br/> Subscribe to designmodo news and updates to stay tuned on great designs.<br/> Go to: <a href="http://designmodo.com/flat-free" target="_blank">designmodo.com/flat-free</a> </p> <p class="pvl"> <a href="https://twitter.com/share" class="twitter-share-button" data-url="http://designmodo.com/flat-free/" data-text="Flat UI Free - PSD&amp;amp;HTML User Interface Kit" data-via="designmodo">Tweet</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <iframe src="http://ghbtns.com/github-btn.html?user=designmodo&repo=Flat-UI&type=watch&count=true" height="20" width="107" frameborder="0" scrolling="0" style="width:105px; height: 20px;" allowTransparency="true"></iframe> <iframe src="http://ghbtns.com/github-btn.html?user=designmodo&repo=Flat-UI&type=fork&count=true" height="20" width="107" frameborder="0" scrolling="0" style="width:105px; height: 20px;" allowTransparency="true"></iframe> <iframe src="http://ghbtns.com/github-btn.html?user=designmodo&type=follow&count=true" height="20" width="195" frameborder="0" scrolling="0" style="width:195px; height: 20px;" allowTransparency="true"></iframe> </p> <a class="footer-brand" href="http://designmodo.com" target="_blank"> <img src="images/footer/logo.png" alt="Designmodo.com" /> </a> </div> <!-- /col-md-7 --> <div class="col-md-5"> <div class="footer-banner"> <h3 class="footer-title">Get Flat UI Pro</h3> <ul> <li>Tons of Basic and Custom UI Elements</li> <li>A Lot of Useful Samples</li> <li>More Vector Icons and Glyphs</li> <li>Pro Color Swatches</li> <li>Bootstrap Based HTML/CSS/JS Layout</li> </ul> Go to: <a href="http://designmodo.com/flat" target="_blank">designmodo.com/flat</a> </div> </div> </div> </div> </footer> <!-- Load JS here for greater good =============================--> <script src="js/jquery-1.8.3.min.js"></script> <script src="js/jquery-ui-1.10.3.custom.min.js"></script> <script src="js/jquery.ui.touch-punch.min.js"></script> <script src="js/bootstrap.min.js"></script> <script src="js/bootstrap-select.js"></script> <script src="js/bootstrap-switch.js"></script> <script src="js/flatui-checkbox.js"></script> <script src="js/flatui-radio.js"></script> <script src="js/jquery.tagsinput.js"></script> <script src="js/jquery.placeholder.js"></script> <script src="js/jquery.stacktable.js"></script> <script src="http://vjs.zencdn.net/4.1/video.js"></script> <script src="js/application.js"></script> </body> </html>
class CreateUsers < ActiveRecord::Migration def change create_table :users do |t| t.string :email t.string :first_name t.string :last_name t.timestamps end end end
try: from astropy.models import ParametricModel,Parameter,_convert_input,_convert_output import numpy as np class PowerLawModel(ParametricModel): param_names = ['scale', 'alpha'] def __init__(self, scale, alpha, param_dim=1): self._scale = Parameter(name='scale', val=scale, mclass=self, param_dim=param_dim) self._alpha = Parameter(name='alpha', val=alpha, mclass=self, param_dim=param_dim) super(ParametricModel,self).__init__(self, self.param_names, ndim=1, outdim=1, param_dim=param_dim) self.linear = False self.deriv = None def eval(self, xvals, params): return params[0]*((xvals)**(-params[1])) def noderiv(self, params, xvals, yvals): deriv_dict = { 'scale': ((xvals)**(-params[1])), 'alpha': params[0]*((xvals)**(-params[1]))*np.log(xvals)} derivval = [deriv_dict[par] for par in self.param_names] return np.array(derivval).T def __call__(self, x): """ Transforms data using this model. Parameters -------------- x : array, of minimum dimensions 1 Notes ----- See the module docstring for rules for model evaluation. """ x, fmt = _convert_input(x, self.param_dim) result = self.eval(x, self.param_sets) return _convert_output(result, fmt) except ImportError: pass
<?php namespace Illuminate\Tests\Database; use Mockery as m; use PHPUnit\Framework\TestCase; use Illuminate\Database\Eloquent\Model; use Illuminate\Database\Eloquent\Builder; use Illuminate\Database\Eloquent\Relations\HasOne; use Illuminate\Database\Eloquent\Relations\Relation; class DatabaseEloquentRelationTest extends TestCase { public function tearDown() { m::close(); } public function testSetRelationFail() { $parent = new EloquentRelationResetModelStub; $relation = new EloquentRelationResetModelStub; $parent->setRelation('test', $relation); $parent->setRelation('foo', 'bar'); $this->assertArrayNotHasKey('foo', $parent->toArray()); } public function testTouchMethodUpdatesRelatedTimestamps() { $builder = m::mock(Builder::class); $parent = m::mock(Model::class); $parent->shouldReceive('getAttribute')->with('id')->andReturn(1); $builder->shouldReceive('getModel')->andReturn($related = m::mock(\stdClass::class)); $builder->shouldReceive('whereNotNull'); $builder->shouldReceive('where'); $builder->shouldReceive('withoutGlobalScopes')->andReturn($builder); $relation = new HasOne($builder, $parent, 'foreign_key', 'id'); $related->shouldReceive('getTable')->andReturn('table'); $related->shouldReceive('getUpdatedAtColumn')->andReturn('updated_at'); $now = \Illuminate\Support\Carbon::now(); $related->shouldReceive('freshTimestampString')->andReturn($now); $builder->shouldReceive('update')->once()->with(['updated_at' => $now]); $relation->touch(); } public function testSettingMorphMapWithNumericArrayUsesTheTableNames() { Relation::morphMap([EloquentRelationResetModelStub::class]); $this->assertEquals([ 'reset' => 'Illuminate\Tests\Database\EloquentRelationResetModelStub', ], Relation::morphMap()); Relation::morphMap([], false); } public function testSettingMorphMapWithNumericKeys() { Relation::morphMap([1 => 'App\User']); $this->assertEquals([ 1 => 'App\User', ], Relation::morphMap()); Relation::morphMap([], false); } public function testMacroable() { Relation::macro('foo', function () { return 'foo'; }); $model = new EloquentRelationResetModelStub; $relation = new EloquentRelationStub($model->newQuery(), $model); $result = $relation->foo(); $this->assertEquals('foo', $result); } } class EloquentRelationResetModelStub extends Model { protected $table = 'reset'; // Override method call which would normally go through __call() public function getQuery() { return $this->newQuery()->getQuery(); } } class EloquentRelationStub extends Relation { public function addConstraints() { } public function addEagerConstraints(array $models) { } public function initRelation(array $models, $relation) { } public function match(array $models, \Illuminate\Database\Eloquent\Collection $results, $relation) { } public function getResults() { } }
#if !defined(CODE_GOOGLE_COM_P_V8_CONVERT_V8_CONVERT_HPP_INCLUDED) #define CODE_GOOGLE_COM_P_V8_CONVERT_V8_CONVERT_HPP_INCLUDED 1 // Doxygen REFUSES to use this block as namespace docs: @namespace cvv8 /** @mainpage libv8-convert (cvv8) The cvv8 namespace (formerly v8::convert) houses APIs for handling the following: - Converting between v8 Value handles and "native types" using generic interface. This allows us to write generic algorithms which convert between JS/C++ without having to know the exact types we're dealing with. The basic POD types and some STL types are supported out of the box and plugging in one's own types is normally quite simple. - Converting free- and member functions into v8::InvocationCallback functions. These generated functions convert the JavaScript-originated function arguments into native counterparts, forward the data to the original native function, and convert the return values back to something JS can use. Those two core features give us all we need in order to be able to bind near-arbitrary C/C++ functions with JavaScript (where calling conventions and type conversions allow us to do so). For cases where the "automatic" function-to-InvocationCallback conversions are not suitable, the type-conversion API can simplify the implementation of custom v8::InvocationCallback functions. All of the conversions are compile-time typesafe where possible and fail gracefully when such a determination can only be made at runtime. This code originated as the core-most component of the v8-juice library (http://code.google.com/p/v8-juice). After a couple years i felt compelled to refactor it into a toolkit usable by arbitrary v8-using clients, doing a bit of cleanup along the way. The eventuall intention is that this code will replace the v8::juice::convert code. Author: Stephan Beal (http://wanderinghorse.net/home/stephan/) License: Dual MIT/Public Domain Project home page: http://code.google.com/p/v8-juice/wiki/V8Convert The most important functions and types, from a user's perspective, include: Converting types: - cvv8::CastToJS() - cvv8::CastFromJS() Implementing custom conversions: - cvv8::NativeToJS - cvv8::JSToNative Converting functions to v8::InvocationCallback: - cvv8::FunctionToInCa - cvv8::MethodToInCa - cvv8::ConstMethodToInCa - cvv8::ToInCa - cvv8::FunctorToInCa - cvv8::PredicatedInCa and cvv8::PredicatedInCaDispatcher Binding JS properties to native properties, functions, methods, or functors: - cvv8::FunctionToGetter, cvv8::FunctionToSetter - cvv8::MethodToGetter, cvv8::MethodToSetter - cvv8::ConstMethodToGetter, cvv8::ConstMethodToSetter - cvv8::FunctorToGetter, cvv8::FunctorToSetter Other utilities: - cvv8::CtorForwarder and cvv8::CtorArityDispatcher - cvv8::ClassCreator simplifies binding of C++ classes with v8. - cvv8::FunctionTo converts functions to ... - cvv8::MethodTo converts methods to ... - cvv8::FunctorTo converts functors to ... - cvv8::VarTo converts variables to ... - cvv8::CallForwarder forwards native arguments to JS functions. - The tmp and sl namespaces hold various template metaprogramming bits. - ... there's more ... Most of the code in this library are internal template specializations which take care of the dirty work. Typical clients won't typically need more than what's listed above. A core rule of this library is "if it ain't documented, don't use it." All public API members which are intended for client-side use are documented. Some one-line proxies whose purpose is either very obvious, exist only for template type resolution reasons, or are strictly internal are not necessarily documented. */ namespace cvv8 { } #include "convert.hpp" #include "invocable.hpp" #include "arguments.hpp" #include "ClassCreator.hpp" #include "properties.hpp" #include "XTo.hpp" /** LICENSE This software's source code, including accompanying documentation and demonstration applications, are licensed under the following conditions... The author (Stephan G. Beal [http://wanderinghorse.net/home/stephan/]) explicitly disclaims copyright in all jurisdictions which recognize such a disclaimer. In such jurisdictions, this software is released into the Public Domain. In jurisdictions which do not recognize Public Domain property (e.g. Germany as of 2011), this software is Copyright (c) 2011 by Stephan G. Beal, and is released under the terms of the MIT License (see below). In jurisdictions which recognize Public Domain property, the user of this software may choose to accept it either as 1) Public Domain, 2) under the conditions of the MIT License (see below), or 3) under the terms of dual Public Domain/MIT License conditions described here, as they choose. The MIT License is about as close to Public Domain as a license can get, and is described in clear, concise terms at: http://en.wikipedia.org/wiki/MIT_License The full text of the MIT License follows: -- Copyright (c) 2011 Stephan G. Beal (http://wanderinghorse.net/home/stephan/) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --END OF MIT LICENSE-- For purposes of the above license, the term "Software" includes documentation and demonstration source code which accompanies this software. ("Accompanies" = is contained in the Software's primary public source code repository.) */ #endif /* CODE_GOOGLE_COM_P_V8_CONVERT_V8_CONVERT_HPP_INCLUDED */
require 'yaml' module VagrantPlugins module ProviderLibvirt module Action class PruneNFSExports def initialize(app, env) @app = app end def call(env) if env[:host] uuid = env[:machine].id # get all uuids uuids = env[:machine].provider.driver.connection.servers.all.map(&:id) # not exiisted in array will removed from nfs uuids.delete(uuid) env[:host].capability( :nfs_prune, env[:machine].ui, uuids) end @app.call(env) end end end end end
require File.dirname(__FILE__) + '/../test_helper' class BonesControllerTest < ActionController::TestCase # Replace this with your real tests. def test_truth assert true end end
<!DOCTYPE html> <html> <head> <title>Plugin: save Demo Page</title> <script src="../../../config/bolt/bootstrap-demo.js"></script> <script> ephox.bolt.module.api.main('tinymce.plugins.save.demo.Demo'); </script> </head> <body> <h2>Plugin: save Demo Page</h2> <div id="ephox-ui"> <textarea name="" id="" cols="30" rows="10" class="tinymce"></textarea> </div> </body> </html>
/************************************** { x:0, y:0, width:433, min:1, max:25, step:1, message: "rules/turns" } **************************************/ function Slider(config){ var self = this; self.id = config.id; // Create DOM var dom = document.createElement("div"); dom.className = "slider"; dom.style.left = config.x+"px"; dom.style.top = config.y+"px"; dom.style.width = config.width+"px"; self.dom = dom; // Background var bg = document.createElement("div"); bg.className = "slider_bg"; dom.appendChild(bg); // Knob var knob = document.createElement("div"); knob.className = "slider_knob"; dom.appendChild(knob); // Set value self.value = 0; var _paramToValue = function(param){ var value = config.min + (config.max-config.min)*param; value = Math.round(value/config.step)*config.step; return value; }; var _valueToParam = function(value){ var param = (value-config.min)/(config.max-config.min); // to (0-1) return param; }; self.setParam = function(param){ // Bounds var value = config.min + (config.max-config.min)*param; value = Math.round(value/config.step)*config.step; self.value = value; // DOM knob.style.left = self.value*config.width-15; }; self.setValue = function(value){ // Set self.value = value; // DOM with param var param = _valueToParam(self.value); knob.style.left = param*(config.width-30); }; if(config.message) listen(self, config.message, self.setValue); // Mouse events var _isDragging = false; var _offsetX = 0; var _mouseToParam = function(event){ // Mouse to Param to Value var param = (event.clientX - _offsetX - dom.getBoundingClientRect().left - 8)/(config.width-30); if(param<0) param=0; if(param>1) param=1; var value = _paramToValue(param); // Publish these changes! (only if ACTUALLY changed) if(self.value != value){ if(config.message) publish(config.message, [value]); if(config.onchange) config.onchange(value); } }; var _onDomMouseDown = function(event){ if(config.onselect) config.onselect(); _mouseToParam(event); _isDragging = true; _offsetX = 0; }; var _onKnobMouseDown = function(event){ _isDragging = true; if(config.onselect) config.onselect(); _offsetX = event.clientX - knob.getBoundingClientRect().left; }; var _onWindowMouseMove = function(event){ if(_isDragging) _mouseToParam(event); }; var _onWindowMouseUp = function(){ _isDragging = false; }; dom.addEventListener("mousedown",_onDomMouseDown,false); knob.addEventListener("mousedown",_onKnobMouseDown,false); window.addEventListener("mousemove",_onWindowMouseMove,false); window.addEventListener("mouseup",_onWindowMouseUp,false); // FOR TOUCH var _fakeEventWrapper = function(event){ var fake = {}; fake.clientX = event.changedTouches[0].clientX; fake.clientY = event.changedTouches[0].clientY; return fake; }; dom.addEventListener("touchstart",function(event){ event = _fakeEventWrapper(event); _onDomMouseDown(event); },false); knob.addEventListener("touchstart",function(event){ event = _fakeEventWrapper(event); _onKnobMouseDown(event); },false); window.addEventListener("touchmove",function(event){ event = _fakeEventWrapper(event); _onWindowMouseMove(event); },false); window.addEventListener("touchend",_onWindowMouseUp,false); //////////////////////////////////////// // Add... self.add = function(){ _add(self); }; // Remove... self.remove = function(){ unlisten(self); _remove(self); }; }
/* // This software is subject to the terms of the Eclipse Public License v1.0 // Agreement, available at the following URL: // http://www.eclipse.org/legal/epl-v10.html. // You must accept the terms of that agreement to use this software. // // Copyright (C) 2001-2005 Julian Hyde // Copyright (C) 2005-2015 Pentaho and others // All Rights Reserved. */ package mondrian.olap; import mondrian.mdx.*; import mondrian.olap.fun.FunUtil; import mondrian.olap.fun.Resolver; import mondrian.olap.type.Type; import mondrian.resource.MondrianResource; import mondrian.rolap.*; import mondrian.spi.UserDefinedFunction; import mondrian.util.*; import org.apache.commons.collections.keyvalue.AbstractMapEntry; import org.apache.commons.io.IOUtils; import org.apache.commons.vfs2.FileContent; import org.apache.commons.vfs2.FileObject; import org.apache.commons.vfs2.FileSystemException; import org.apache.commons.vfs2.FileSystemManager; import org.apache.commons.vfs2.VFS; import org.apache.commons.vfs2.provider.http.HttpFileObject; import org.apache.log4j.Logger; import org.eigenbase.xom.XOMUtil; import org.olap4j.impl.Olap4jUtil; import org.olap4j.mdx.*; import java.io.*; import java.lang.ref.Reference; import java.lang.reflect.*; import java.lang.reflect.Array; import java.math.BigDecimal; import java.net.MalformedURLException; import java.net.URL; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.sql.*; import java.sql.Connection; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Utility functions used throughout mondrian. All methods are static. * * @author jhyde * @since 6 August, 2001 */ public class Util extends XOMUtil { public static final String nl = System.getProperty("line.separator"); private static final Logger LOGGER = Logger.getLogger(Util.class); /** * Placeholder which indicates a value NULL. */ public static final Object nullValue = new Double(FunUtil.DoubleNull); /** * Placeholder which indicates an EMPTY value. */ public static final Object EmptyValue = new Double(FunUtil.DoubleEmpty); /** * Cumulative time spent accessing the database. */ private static long databaseMillis = 0; /** * Random number generator to provide seed for other random number * generators. */ private static final Random metaRandom = createRandom(MondrianProperties.instance().TestSeed.get()); /** Unique id for this JVM instance. Part of a key that ensures that if * two JVMs in the same cluster have a data-source with the same * identity-hash-code, they will be treated as different data-sources, * and therefore caches will not be incorrectly shared. */ public static final UUID JVM_INSTANCE_UUID = UUID.randomUUID(); /** * Whether this is an IBM JVM. */ public static final boolean IBM_JVM = System.getProperties().getProperty("java.vendor").equals( "IBM Corporation"); /** * What version of JDBC? * Returns:<ul> * <li>0x0401 in JDK 1.7 and higher</li> * <li>0x0400 in JDK 1.6</li> * <li>0x0300 otherwise</li> * </ul> */ public static final int JdbcVersion = System.getProperty("java.version").compareTo("1.7") >= 0 ? 0x0401 : System.getProperty("java.version").compareTo("1.6") >= 0 ? 0x0400 : 0x0300; /** * Whether the code base has re-engineered using retroweaver. * If this is the case, some functionality is not available, but a lot of * things are available via {@link mondrian.util.UtilCompatible}. * Retroweaver has some problems involving {@link java.util.EnumSet}. */ public static final boolean Retrowoven = Access.class.getSuperclass().getName().equals( "net.sourceforge.retroweaver.runtime.java.lang.Enum"); private static final UtilCompatible compatible; /** * Flag to control expensive debugging. (More expensive than merely * enabling assertions: as we know, a lot of people run with assertions * enabled.) */ public static final boolean DEBUG = false; static { compatible = new UtilCompatibleJdk16(); } public static boolean isNull(Object o) { return o == null || o == nullValue; } /** * Returns whether a list is strictly sorted. * * @param list List * @return whether list is sorted */ public static <T> boolean isSorted(List<T> list) { T prev = null; for (T t : list) { if (prev != null && ((Comparable<T>) prev).compareTo(t) >= 0) { return false; } prev = t; } return true; } /** * Parses a string and returns a SHA-256 checksum of it. * * @param value The source string to parse. * @return A checksum of the source string. */ public static byte[] digestSha256(String value) { final MessageDigest algorithm; try { algorithm = MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } return algorithm.digest(value.getBytes()); } /** * Creates an MD5 hash of a String. * * @param value String to create one way hash upon. * @return MD5 hash. */ public static byte[] digestMd5(final String value) { final MessageDigest algorithm; try { algorithm = MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } return algorithm.digest(value.getBytes()); } /** * Creates an {@link ExecutorService} object backed by a thread pool. * @param maximumPoolSize Maximum number of concurrent * threads. * @param corePoolSize Minimum number of concurrent * threads to maintain in the pool, even if they are * idle. * @param keepAliveTime Time, in seconds, for which to * keep alive unused threads. * @param name The name of the threads. * @param rejectionPolicy The rejection policy to enforce. * @return An executor service preconfigured. */ public static ExecutorService getExecutorService( int maximumPoolSize, int corePoolSize, long keepAliveTime, final String name, RejectedExecutionHandler rejectionPolicy) { // We must create a factory where the threads // have the right name and are marked as daemon threads. final ThreadFactory factory = new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(0); public Thread newThread(Runnable r) { final Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); t.setName(name + '_' + counter.incrementAndGet()); return t; } }; // Ok, create the executor final ThreadPoolExecutor executor = new ThreadPoolExecutor( corePoolSize, maximumPoolSize > 0 ? maximumPoolSize : Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, // we use a sync queue. any other type of queue // will prevent the tasks from running concurrently // because the executors API requires blocking queues. // Important to pass true here. This makes the // order of tasks deterministic. // TODO Write a non-blocking queue which implements // the blocking queue API so we can pass that to the // executor. new LinkedBlockingQueue<Runnable>(), factory); // Set the rejection policy if required. if (rejectionPolicy != null) { executor.setRejectedExecutionHandler( rejectionPolicy); } // Done return executor; } /** * Creates an {@link ScheduledExecutorService} object backed by a * thread pool with a fixed number of threads.. * @param maxNbThreads Maximum number of concurrent * threads. * @param name The name of the threads. * @return An scheduled executor service preconfigured. */ public static ScheduledExecutorService getScheduledExecutorService( final int maxNbThreads, final String name) { return Executors.newScheduledThreadPool( maxNbThreads, new ThreadFactory() { final AtomicInteger counter = new AtomicInteger(0); public Thread newThread(Runnable r) { final Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setDaemon(true); thread.setName(name + '_' + counter.incrementAndGet()); return thread; } } ); } /** * Encodes string for MDX (escapes ] as ]] inside a name). * * @deprecated Will be removed in 4.0 */ public static String mdxEncodeString(String st) { StringBuilder retString = new StringBuilder(st.length() + 20); for (int i = 0; i < st.length(); i++) { char c = st.charAt(i); if ((c == ']') && ((i + 1) < st.length()) && (st.charAt(i + 1) != '.')) { retString.append(']'); // escaping character } retString.append(c); } return retString.toString(); } /** * Converts a string into a double-quoted string. */ public static String quoteForMdx(String val) { StringBuilder buf = new StringBuilder(val.length() + 20); quoteForMdx(buf, val); return buf.toString(); } /** * Appends a double-quoted string to a string builder. */ public static StringBuilder quoteForMdx(StringBuilder buf, String val) { buf.append("\""); String s0 = replace(val, "\"", "\"\""); buf.append(s0); buf.append("\""); return buf; } /** * Return string quoted in [...]. For example, "San Francisco" becomes * "[San Francisco]"; "a [bracketed] string" becomes * "[a [bracketed]] string]". */ public static String quoteMdxIdentifier(String id) { StringBuilder buf = new StringBuilder(id.length() + 20); quoteMdxIdentifier(id, buf); return buf.toString(); } public static void quoteMdxIdentifier(String id, StringBuilder buf) { buf.append('['); int start = buf.length(); buf.append(id); replace(buf, start, "]", "]]"); buf.append(']'); } /** * Return identifiers quoted in [...].[...]. For example, {"Store", "USA", * "California"} becomes "[Store].[USA].[California]". */ public static String quoteMdxIdentifier(List<Id.Segment> ids) { StringBuilder sb = new StringBuilder(64); quoteMdxIdentifier(ids, sb); return sb.toString(); } public static void quoteMdxIdentifier( List<Id.Segment> ids, StringBuilder sb) { for (int i = 0; i < ids.size(); i++) { if (i > 0) { sb.append('.'); } ids.get(i).toString(sb); } } /** * Quotes a string literal for Java or JavaScript. * * @param s Unquoted literal * @return Quoted string literal */ public static String quoteJavaString(String s) { return s == null ? "null" : "\"" + s.replaceAll("\\\\", "\\\\\\\\") .replaceAll("\\\"", "\\\\\"") + "\""; } /** * Returns true if two objects are equal, or are both null. * * @param s First object * @param t Second object * @return Whether objects are equal or both null */ public static boolean equals(Object s, Object t) { if (s == t) { return true; } if (s == null || t == null) { return false; } return s.equals(t); } /** * Returns true if two strings are equal, or are both null. * * <p>The result is not affected by * {@link MondrianProperties#CaseSensitive the case sensitive option}; if * you wish to compare names, use {@link #equalName(String, String)}. */ public static boolean equals(String s, String t) { return equals((Object) s, (Object) t); } /** * Returns whether two names are equal. * Takes into account the * {@link MondrianProperties#CaseSensitive case sensitive option}. * Names may be null. */ public static boolean equalName(String s, String t) { if (s == null) { return t == null; } boolean caseSensitive = MondrianProperties.instance().CaseSensitive.get(); return caseSensitive ? s.equals(t) : s.equalsIgnoreCase(t); } /** * Tests two strings for equality, optionally ignoring case. * * @param s First string * @param t Second string * @param matchCase Whether to perform case-sensitive match * @return Whether strings are equal */ public static boolean equal(String s, String t, boolean matchCase) { return matchCase ? s.equals(t) : s.equalsIgnoreCase(t); } /** * Compares two names. if case sensitive flag is false, * apply finer grain difference with case sensitive * Takes into account the {@link MondrianProperties#CaseSensitive case * sensitive option}. * Names must not be null. */ public static int caseSensitiveCompareName(String s, String t) { boolean caseSensitive = MondrianProperties.instance().CaseSensitive.get(); if (caseSensitive) { return s.compareTo(t); } else { int v = s.compareToIgnoreCase(t); // if ignore case returns 0 compare in a case sensitive manner // this was introduced to solve an issue with Member.equals() // and Member.compareTo() not agreeing with each other return v == 0 ? s.compareTo(t) : v; } } /** * Compares two names. * Takes into account the {@link MondrianProperties#CaseSensitive case * sensitive option}. * Names must not be null. */ public static int compareName(String s, String t) { boolean caseSensitive = MondrianProperties.instance().CaseSensitive.get(); return caseSensitive ? s.compareTo(t) : s.compareToIgnoreCase(t); } /** * Generates a normalized form of a name, for use as a key into a map. * Returns the upper case name if * {@link MondrianProperties#CaseSensitive} is true, the name unchanged * otherwise. */ public static String normalizeName(String s) { return MondrianProperties.instance().CaseSensitive.get() ? s : s.toUpperCase(); } /** * Returns the result of ((Comparable) k1).compareTo(k2), with * special-casing for the fact that Boolean only became * comparable in JDK 1.5. * * @see Comparable#compareTo */ public static int compareKey(Object k1, Object k2) { if (k1 instanceof Boolean) { // Luckily, "F" comes before "T" in the alphabet. k1 = k1.toString(); k2 = k2.toString(); } return ((Comparable) k1).compareTo(k2); } /** * Compares integer values. * * @param i0 First integer * @param i1 Second integer * @return Comparison of integers */ public static int compare(int i0, int i1) { return i0 < i1 ? -1 : (i0 == i1 ? 0 : 1); } /** * Returns a string with every occurrence of a seek string replaced with * another. */ public static String replace(String s, String find, String replace) { // let's be optimistic int found = s.indexOf(find); if (found == -1) { return s; } StringBuilder sb = new StringBuilder(s.length() + 20); int start = 0; char[] chars = s.toCharArray(); final int step = find.length(); if (step == 0) { // Special case where find is "". sb.append(s); replace(sb, 0, find, replace); } else { for (;;) { sb.append(chars, start, found - start); if (found == s.length()) { break; } sb.append(replace); start = found + step; found = s.indexOf(find, start); if (found == -1) { found = s.length(); } } } return sb.toString(); } /** * Replaces all occurrences of a string in a buffer with another. * * @param buf String buffer to act on * @param start Ordinal within <code>find</code> to start searching * @param find String to find * @param replace String to replace it with * @return The string buffer */ public static StringBuilder replace( StringBuilder buf, int start, String find, String replace) { // Search and replace from the end towards the start, to avoid O(n ^ 2) // copying if the string occurs very commonly. int findLength = find.length(); if (findLength == 0) { // Special case where the seek string is empty. for (int j = buf.length(); j >= 0; --j) { buf.insert(j, replace); } return buf; } int k = buf.length(); while (k > 0) { int i = buf.lastIndexOf(find, k); if (i < start) { break; } buf.replace(i, i + find.length(), replace); // Step back far enough to ensure that the beginning of the section // we just replaced does not cause a match. k = i - findLength; } return buf; } /** * Parses an MDX identifier such as <code>[Foo].[Bar].Baz.&Key&Key2</code> * and returns the result as a list of segments. * * @param s MDX identifier * @return List of segments */ public static List<Id.Segment> parseIdentifier(String s) { return convert( org.olap4j.impl.IdentifierParser.parseIdentifier(s)); } /** * Converts an array of name parts {"part1", "part2"} into a single string * "[part1].[part2]". If the names contain "]" they are escaped as "]]". */ public static String implode(List<Id.Segment> names) { StringBuilder sb = new StringBuilder(64); for (int i = 0; i < names.size(); i++) { if (i > 0) { sb.append("."); } // FIXME: should be: // names.get(i).toString(sb); // but that causes some tests to fail Id.Segment segment = names.get(i); switch (segment.getQuoting()) { case UNQUOTED: segment = new Id.NameSegment(((Id.NameSegment) segment).name); } segment.toString(sb); } return sb.toString(); } public static String makeFqName(String name) { return quoteMdxIdentifier(name); } public static String makeFqName(OlapElement parent, String name) { if (parent == null) { return Util.quoteMdxIdentifier(name); } else { StringBuilder buf = new StringBuilder(64); buf.append(parent.getUniqueName()); buf.append('.'); Util.quoteMdxIdentifier(name, buf); return buf.toString(); } } public static String makeFqName(String parentUniqueName, String name) { if (parentUniqueName == null) { return quoteMdxIdentifier(name); } else { StringBuilder buf = new StringBuilder(64); buf.append(parentUniqueName); buf.append('.'); Util.quoteMdxIdentifier(name, buf); return buf.toString(); } } public static OlapElement lookupCompound( SchemaReader schemaReader, OlapElement parent, List<Id.Segment> names, boolean failIfNotFound, int category) { return lookupCompound( schemaReader, parent, names, failIfNotFound, category, MatchType.EXACT); } /** * Resolves a name such as * '[Products]&#46;[Product Department]&#46;[Produce]' by resolving the * components ('Products', and so forth) one at a time. * * @param schemaReader Schema reader, supplies access-control context * @param parent Parent element to search in * @param names Exploded compound name, such as {"Products", * "Product Department", "Produce"} * @param failIfNotFound If the element is not found, determines whether * to return null or throw an error * @param category Type of returned element, a {@link Category} value; * {@link Category#Unknown} if it doesn't matter. * * @pre parent != null * @post !(failIfNotFound && return == null) * * @see #parseIdentifier(String) */ public static OlapElement lookupCompound( SchemaReader schemaReader, OlapElement parent, List<Id.Segment> names, boolean failIfNotFound, int category, MatchType matchType) { Util.assertPrecondition(parent != null, "parent != null"); if (LOGGER.isDebugEnabled()) { StringBuilder buf = new StringBuilder(64); buf.append("Util.lookupCompound: "); buf.append("parent.name="); buf.append(parent.getName()); buf.append(", category="); buf.append(Category.instance.getName(category)); buf.append(", names="); quoteMdxIdentifier(names, buf); LOGGER.debug(buf.toString()); } // First look up a member from the cache of calculated members // (cubes and queries both have them). switch (category) { case Category.Member: case Category.Unknown: Member member = schemaReader.getCalculatedMember(names); if (member != null) { return member; } } // Likewise named set. switch (category) { case Category.Set: case Category.Unknown: NamedSet namedSet = schemaReader.getNamedSet(names); if (namedSet != null) { return namedSet; } } // Now resolve the name one part at a time. for (int i = 0; i < names.size(); i++) { OlapElement child; Id.NameSegment name; if (names.get(i) instanceof Id.NameSegment) { name = (Id.NameSegment) names.get(i); child = schemaReader.getElementChild(parent, name, matchType); } else if (parent instanceof RolapLevel && names.get(i) instanceof Id.KeySegment && names.get(i).getKeyParts().size() == 1) { // The following code is for SsasCompatibleNaming=false. // Continues the very limited support for key segments in // mondrian-3.x. To be removed in mondrian-4, when // SsasCompatibleNaming=true is the only option. final Id.KeySegment keySegment = (Id.KeySegment) names.get(i); name = keySegment.getKeyParts().get(0); final List<Member> levelMembers = schemaReader.getLevelMembers( (Level) parent, false); child = null; for (Member member : levelMembers) { if (((RolapMember) member).getKey().toString().equals( name.getName())) { child = member; break; } } } else { name = null; child = schemaReader.getElementChild(parent, name, matchType); } // if we're doing a non-exact match, and we find a non-exact // match, then for an after match, return the first child // of each subsequent level; for a before match, return the // last child if (child instanceof Member && !matchType.isExact() && !Util.equalName(child.getName(), name.getName())) { Member bestChild = (Member) child; for (int j = i + 1; j < names.size(); j++) { List<Member> childrenList = schemaReader.getMemberChildren(bestChild); FunUtil.hierarchizeMemberList(childrenList, false); if (matchType == MatchType.AFTER) { bestChild = childrenList.get(0); } else { bestChild = childrenList.get(childrenList.size() - 1); } if (bestChild == null) { child = null; break; } } parent = bestChild; break; } if (child == null) { if (LOGGER.isDebugEnabled()) { LOGGER.debug( "Util.lookupCompound: " + "parent.name=" + parent.getName() + " has no child with name=" + name); } if (!failIfNotFound) { return null; } else if (category == Category.Member) { throw MondrianResource.instance().MemberNotFound.ex( quoteMdxIdentifier(names)); } else { throw MondrianResource.instance().MdxChildObjectNotFound .ex(name.toString(), parent.getQualifiedName()); } } parent = child; if (matchType == MatchType.EXACT_SCHEMA) { matchType = MatchType.EXACT; } } if (LOGGER.isDebugEnabled()) { LOGGER.debug( "Util.lookupCompound: " + "found child.name=" + parent.getName() + ", child.class=" + parent.getClass().getName()); } switch (category) { case Category.Dimension: if (parent instanceof Dimension) { return parent; } else if (parent instanceof Hierarchy) { return parent.getDimension(); } else if (failIfNotFound) { throw Util.newError( "Can not find dimension '" + implode(names) + "'"); } else { return null; } case Category.Hierarchy: if (parent instanceof Hierarchy) { return parent; } else if (parent instanceof Dimension) { return parent.getHierarchy(); } else if (failIfNotFound) { throw Util.newError( "Can not find hierarchy '" + implode(names) + "'"); } else { return null; } case Category.Level: if (parent instanceof Level) { return parent; } else if (failIfNotFound) { throw Util.newError( "Can not find level '" + implode(names) + "'"); } else { return null; } case Category.Member: if (parent instanceof Member) { return parent; } else if (failIfNotFound) { throw MondrianResource.instance().MdxCantFindMember.ex( implode(names)); } else { return null; } case Category.Unknown: assertPostcondition(parent != null, "return != null"); return parent; default: throw newInternal("Bad switch " + category); } } public static OlapElement lookup(Query q, List<Id.Segment> nameParts) { final Exp exp = lookup(q, nameParts, false); if (exp instanceof MemberExpr) { MemberExpr memberExpr = (MemberExpr) exp; return memberExpr.getMember(); } else if (exp instanceof LevelExpr) { LevelExpr levelExpr = (LevelExpr) exp; return levelExpr.getLevel(); } else if (exp instanceof HierarchyExpr) { HierarchyExpr hierarchyExpr = (HierarchyExpr) exp; return hierarchyExpr.getHierarchy(); } else if (exp instanceof DimensionExpr) { DimensionExpr dimensionExpr = (DimensionExpr) exp; return dimensionExpr.getDimension(); } else { throw Util.newInternal("Not an olap element: " + exp); } } /** * Converts an identifier into an expression by resolving its parts into * an OLAP object (dimension, hierarchy, level or member) within the * context of a query. * * <p>If <code>allowProp</code> is true, also allows property references * from valid members, for example * <code>[Measures].[Unit Sales].FORMATTED_VALUE</code>. * In this case, the result will be a {@link mondrian.mdx.ResolvedFunCall}. * * @param q Query expression belongs to * @param nameParts Parts of the identifier * @param allowProp Whether to allow property references * @return OLAP object or property reference */ public static Exp lookup( Query q, List<Id.Segment> nameParts, boolean allowProp) { return lookup(q, q.getSchemaReader(true), nameParts, allowProp); } /** * Converts an identifier into an expression by resolving its parts into * an OLAP object (dimension, hierarchy, level or member) within the * context of a query. * * <p>If <code>allowProp</code> is true, also allows property references * from valid members, for example * <code>[Measures].[Unit Sales].FORMATTED_VALUE</code>. * In this case, the result will be a {@link ResolvedFunCall}. * * @param q Query expression belongs to * @param schemaReader Schema reader * @param segments Parts of the identifier * @param allowProp Whether to allow property references * @return OLAP object or property reference */ public static Exp lookup( Query q, SchemaReader schemaReader, List<Id.Segment> segments, boolean allowProp) { // First, look for a calculated member defined in the query. final String fullName = quoteMdxIdentifier(segments); // Look for any kind of object (member, level, hierarchy, // dimension) in the cube. Use a schema reader without restrictions. final SchemaReader schemaReaderSansAc = schemaReader.withoutAccessControl().withLocus(); final Cube cube = q.getCube(); OlapElement olapElement = schemaReaderSansAc.lookupCompound( cube, segments, false, Category.Unknown); if (olapElement != null) { Role role = schemaReader.getRole(); if (!role.canAccess(olapElement)) { olapElement = null; } if (olapElement instanceof Member) { olapElement = schemaReader.substitute((Member) olapElement); } } if (olapElement == null) { if (allowProp && segments.size() > 1) { List<Id.Segment> segmentsButOne = segments.subList(0, segments.size() - 1); final Id.Segment lastSegment = last(segments); final String propertyName = lastSegment instanceof Id.NameSegment ? ((Id.NameSegment) lastSegment).getName() : null; final Member member = (Member) schemaReaderSansAc.lookupCompound( cube, segmentsButOne, false, Category.Member); if (member != null && propertyName != null && isValidProperty(propertyName, member.getLevel())) { return new UnresolvedFunCall( propertyName, Syntax.Property, new Exp[] { createExpr(member)}); } final Level level = (Level) schemaReaderSansAc.lookupCompound( cube, segmentsButOne, false, Category.Level); if (level != null && propertyName != null && isValidProperty(propertyName, level)) { return new UnresolvedFunCall( propertyName, Syntax.Property, new Exp[] { createExpr(level)}); } } // if we're in the middle of loading the schema, the property has // been set to ignore invalid members, and the member is // non-existent, return the null member corresponding to the // hierarchy of the element we're looking for; locate the // hierarchy by incrementally truncating the name of the element if (q.ignoreInvalidMembers()) { int nameLen = segments.size() - 1; olapElement = null; while (nameLen > 0 && olapElement == null) { List<Id.Segment> partialName = segments.subList(0, nameLen); olapElement = schemaReaderSansAc.lookupCompound( cube, partialName, false, Category.Unknown); nameLen--; } if (olapElement != null) { olapElement = olapElement.getHierarchy().getNullMember(); } else { throw MondrianResource.instance().MdxChildObjectNotFound.ex( fullName, cube.getQualifiedName()); } } else { throw MondrianResource.instance().MdxChildObjectNotFound.ex( fullName, cube.getQualifiedName()); } } // keep track of any measure members referenced; these will be used // later to determine if cross joins on virtual cubes can be // processed natively q.addMeasuresMembers(olapElement); return createExpr(olapElement); } /** * Looks up a cube in a schema reader. * * @param cubeName Cube name * @param fail Whether to fail if not found. * @return Cube, or null if not found */ static Cube lookupCube( SchemaReader schemaReader, String cubeName, boolean fail) { for (Cube cube : schemaReader.getCubes()) { if (Util.compareName(cube.getName(), cubeName) == 0) { return cube; } } if (fail) { throw MondrianResource.instance().MdxCubeNotFound.ex(cubeName); } return null; } /** * Converts an olap element (dimension, hierarchy, level or member) into * an expression representing a usage of that element in an MDX statement. */ public static Exp createExpr(OlapElement element) { if (element instanceof Member) { Member member = (Member) element; return new MemberExpr(member); } else if (element instanceof Level) { Level level = (Level) element; return new LevelExpr(level); } else if (element instanceof Hierarchy) { Hierarchy hierarchy = (Hierarchy) element; return new HierarchyExpr(hierarchy); } else if (element instanceof Dimension) { Dimension dimension = (Dimension) element; return new DimensionExpr(dimension); } else if (element instanceof NamedSet) { NamedSet namedSet = (NamedSet) element; return new NamedSetExpr(namedSet); } else { throw Util.newInternal("Unexpected element type: " + element); } } public static Member lookupHierarchyRootMember( SchemaReader reader, Hierarchy hierarchy, Id.NameSegment memberName) { return lookupHierarchyRootMember( reader, hierarchy, memberName, MatchType.EXACT); } /** * Finds a root member of a hierarchy with a given name. * * @param hierarchy Hierarchy * @param memberName Name of root member * @return Member, or null if not found */ public static Member lookupHierarchyRootMember( SchemaReader reader, Hierarchy hierarchy, Id.NameSegment memberName, MatchType matchType) { // Lookup member at first level. // // Don't use access control. Suppose we cannot see the 'nation' level, // we still want to be able to resolve '[Customer].[USA].[CA]'. List<Member> rootMembers = reader.getHierarchyRootMembers(hierarchy); // if doing an inexact search on a non-all hierarchy, create // a member corresponding to the name we're searching for so // we can use it in a hierarchical search Member searchMember = null; if (!matchType.isExact() && !hierarchy.hasAll() && !rootMembers.isEmpty()) { searchMember = hierarchy.createMember( null, rootMembers.get(0).getLevel(), memberName.name, null); } int bestMatch = -1; int k = -1; for (Member rootMember : rootMembers) { ++k; int rc; // when searching on the ALL hierarchy, match must be exact if (matchType.isExact() || hierarchy.hasAll()) { rc = rootMember.getName().compareToIgnoreCase(memberName.name); } else { rc = FunUtil.compareSiblingMembers( rootMember, searchMember); } if (rc == 0) { return rootMember; } if (!hierarchy.hasAll()) { if (matchType == MatchType.BEFORE) { if (rc < 0 && (bestMatch == -1 || FunUtil.compareSiblingMembers( rootMember, rootMembers.get(bestMatch)) > 0)) { bestMatch = k; } } else if (matchType == MatchType.AFTER) { if (rc > 0 && (bestMatch == -1 || FunUtil.compareSiblingMembers( rootMember, rootMembers.get(bestMatch)) < 0)) { bestMatch = k; } } } } if (matchType == MatchType.EXACT_SCHEMA) { return null; } if (matchType != MatchType.EXACT && bestMatch != -1) { return rootMembers.get(bestMatch); } // If the first level is 'all', lookup member at second level. For // example, they could say '[USA]' instead of '[(All // Customers)].[USA]'. return (rootMembers.size() > 0 && rootMembers.get(0).isAll()) ? reader.lookupMemberChildByName( rootMembers.get(0), memberName, matchType) : null; } /** * Finds a named level in this hierarchy. Returns null if there is no * such level. */ public static Level lookupHierarchyLevel(Hierarchy hierarchy, String s) { final Level[] levels = hierarchy.getLevels(); for (Level level : levels) { if (level.getName().equalsIgnoreCase(s)) { return level; } } return null; } /** * Finds the zero based ordinal of a Member among its siblings. */ public static int getMemberOrdinalInParent( SchemaReader reader, Member member) { Member parent = member.getParentMember(); List<Member> siblings = (parent == null) ? reader.getHierarchyRootMembers(member.getHierarchy()) : reader.getMemberChildren(parent); for (int i = 0; i < siblings.size(); i++) { if (siblings.get(i).equals(member)) { return i; } } throw Util.newInternal( "could not find member " + member + " amongst its siblings"); } /** * returns the first descendant on the level underneath parent. * If parent = [Time].[1997] and level = [Time].[Month], then * the member [Time].[1997].[Q1].[1] will be returned */ public static Member getFirstDescendantOnLevel( SchemaReader reader, Member parent, Level level) { Member m = parent; while (m.getLevel() != level) { List<Member> children = reader.getMemberChildren(m); m = children.get(0); } return m; } /** * Returns whether a string is null or empty. */ public static boolean isEmpty(String s) { return (s == null) || (s.length() == 0); } /** * Encloses a value in single-quotes, to make a SQL string value. Examples: * <code>singleQuoteForSql(null)</code> yields <code>NULL</code>; * <code>singleQuoteForSql("don't")</code> yields <code>'don''t'</code>. */ public static String singleQuoteString(String val) { StringBuilder buf = new StringBuilder(64); singleQuoteString(val, buf); return buf.toString(); } /** * Encloses a value in single-quotes, to make a SQL string value. Examples: * <code>singleQuoteForSql(null)</code> yields <code>NULL</code>; * <code>singleQuoteForSql("don't")</code> yields <code>'don''t'</code>. */ public static void singleQuoteString(String val, StringBuilder buf) { buf.append('\''); String s0 = replace(val, "'", "''"); buf.append(s0); buf.append('\''); } /** * Creates a random number generator. * * @param seed Seed for random number generator. * If 0, generate a seed from the system clock and print the value * chosen. (This is effectively non-deterministic.) * If -1, generate a seed from an internal random number generator. * (This is deterministic, but ensures that different tests have * different seeds.) * * @return A random number generator. */ public static Random createRandom(long seed) { if (seed == 0) { seed = new Random().nextLong(); System.out.println("random: seed=" + seed); } else if (seed == -1 && metaRandom != null) { seed = metaRandom.nextLong(); } return new Random(seed); } /** * Returns whether a property is valid for a member of a given level. * It is valid if the property is defined at the level or at * an ancestor level, or if the property is a standard property such as * "FORMATTED_VALUE". * * @param propertyName Property name * @param level Level * @return Whether property is valid */ public static boolean isValidProperty( String propertyName, Level level) { return lookupProperty(level, propertyName) != null; } /** * Finds a member property called <code>propertyName</code> at, or above, * <code>level</code>. */ public static Property lookupProperty( Level level, String propertyName) { do { Property[] properties = level.getProperties(); for (Property property : properties) { if (property.getName().equals(propertyName)) { return property; } } level = level.getParentLevel(); } while (level != null); // Now try a standard property. boolean caseSensitive = MondrianProperties.instance().CaseSensitive.get(); final Property property = Property.lookup(propertyName, caseSensitive); if (property != null && property.isMemberProperty() && property.isStandard()) { return property; } return null; } /** * Insert a call to this method if you want to flag a piece of * undesirable code. * * @deprecated */ public static <T> T deprecated(T reason) { throw new UnsupportedOperationException(reason.toString()); } /** * Insert a call to this method if you want to flag a piece of * undesirable code. * * @deprecated */ public static <T> T deprecated(T reason, boolean fail) { if (fail) { throw new UnsupportedOperationException(reason.toString()); } else { return reason; } } public static List<Member> addLevelCalculatedMembers( SchemaReader reader, Level level, List<Member> members) { List<Member> calcMembers = reader.getCalculatedMembers(level.getHierarchy()); List<Member> calcMembersInThisLevel = new ArrayList<Member>(); for (Member calcMember : calcMembers) { if (calcMember.getLevel().equals(level)) { calcMembersInThisLevel.add(calcMember); } } if (!calcMembersInThisLevel.isEmpty()) { List<Member> newMemberList = new ConcatenableList<Member>(); newMemberList.addAll(members); newMemberList.addAll(calcMembersInThisLevel); return newMemberList; } return members; } /** * Returns an exception which indicates that a particular piece of * functionality should work, but a developer has not implemented it yet. */ public static RuntimeException needToImplement(Object o) { throw new UnsupportedOperationException("need to implement " + o); } /** * Returns an exception indicating that we didn't expect to find this value * here. */ public static <T extends Enum<T>> RuntimeException badValue( Enum<T> anEnum) { return Util.newInternal( "Was not expecting value '" + anEnum + "' for enumeration '" + anEnum.getDeclaringClass().getName() + "' in this context"); } /** * Converts a list of SQL-style patterns into a Java regular expression. * * <p>For example, {"Foo_", "Bar%BAZ"} becomes "Foo.|Bar.*BAZ". * * @param wildcards List of SQL-style wildcard expressions * @return Regular expression */ public static String wildcardToRegexp(List<String> wildcards) { StringBuilder buf = new StringBuilder(); for (String value : wildcards) { if (buf.length() > 0) { buf.append('|'); } int i = 0; while (true) { int percent = value.indexOf('%', i); int underscore = value.indexOf('_', i); if (percent == -1 && underscore == -1) { if (i < value.length()) { buf.append(quotePattern(value.substring(i))); } break; } if (underscore >= 0 && (underscore < percent || percent < 0)) { if (i < underscore) { buf.append( quotePattern(value.substring(i, underscore))); } buf.append('.'); i = underscore + 1; } else if (percent >= 0 && (percent < underscore || underscore < 0)) { if (i < percent) { buf.append( quotePattern(value.substring(i, percent))); } buf.append(".*"); i = percent + 1; } else { throw new IllegalArgumentException(); } } } return buf.toString(); } /** * Converts a camel-case name to an upper-case name with underscores. * * <p>For example, <code>camelToUpper("FooBar")</code> returns "FOO_BAR". * * @param s Camel-case string * @return Upper-case string */ public static String camelToUpper(String s) { StringBuilder buf = new StringBuilder(s.length() + 10); int prevUpper = -1; for (int i = 0; i < s.length(); ++i) { char c = s.charAt(i); if (Character.isUpperCase(c)) { if (i > prevUpper + 1) { buf.append('_'); } prevUpper = i; } else { c = Character.toUpperCase(c); } buf.append(c); } return buf.toString(); } /** * Parses a comma-separated list. * * <p>If a value contains a comma, escape it with a second comma. For * example, <code>parseCommaList("x,y,,z")</code> returns * <code>{"x", "y,z"}</code>. * * @param nameCommaList List of names separated by commas * @return List of names */ public static List<String> parseCommaList(String nameCommaList) { if (nameCommaList.equals("")) { return Collections.emptyList(); } if (nameCommaList.endsWith(",")) { // Special treatment for list ending in ",", because split ignores // entries after separator. final String zzz = "zzz"; final List<String> list = parseCommaList(nameCommaList + zzz); String last = list.get(list.size() - 1); if (last.equals(zzz)) { list.remove(list.size() - 1); } else { list.set( list.size() - 1, last.substring(0, last.length() - zzz.length())); } return list; } List<String> names = new ArrayList<String>(); final String[] strings = nameCommaList.split(","); for (String string : strings) { final int count = names.size(); if (count > 0 && names.get(count - 1).equals("")) { if (count == 1) { if (string.equals("")) { names.add(""); } else { names.set( 0, "," + string); } } else { names.set( count - 2, names.get(count - 2) + "," + string); names.remove(count - 1); } } else { names.add(string); } } return names; } /** * Returns an annotation of a particular class on a method. Returns the * default value if the annotation is not present, or in JDK 1.4. * * @param method Method containing annotation * @param annotationClassName Name of annotation class to find * @param defaultValue Value to return if annotation is not present * @return value of annotation */ public static <T> T getAnnotation( Method method, String annotationClassName, T defaultValue) { return compatible.getAnnotation( method, annotationClassName, defaultValue); } /** * Closes and cancels a {@link Statement} using the correct methods * available on the current Java runtime. * <p>If errors are encountered while canceling a statement, * the message is logged in {@link Util}. * @param stmt The statement to cancel. */ public static void cancelStatement(Statement stmt) { compatible.cancelStatement(stmt); } public static MemoryInfo getMemoryInfo() { return compatible.getMemoryInfo(); } /** * Converts a list of a string. * * For example, * <code>commaList("foo", Arrays.asList({"a", "b"}))</code> * returns "foo(a, b)". * * @param s Prefix * @param list List * @return String representation of string */ public static <T> String commaList( String s, List<T> list) { final StringBuilder buf = new StringBuilder(s); buf.append("("); int k = -1; for (T t : list) { if (++k > 0) { buf.append(", "); } buf.append(t); } buf.append(")"); return buf.toString(); } /** * Makes a name distinct from other names which have already been used * and shorter than a length limit, adds it to the list, and returns it. * * @param name Suggested name, may not be unique * @param maxLength Maximum length of generated name * @param nameList Collection of names already used * * @return Unique name */ public static String uniquify( String name, int maxLength, Collection<String> nameList) { assert name != null; if (name.length() > maxLength) { name = name.substring(0, maxLength); } if (nameList.contains(name)) { String aliasBase = name; int j = 0; while (true) { name = aliasBase + j; if (name.length() > maxLength) { aliasBase = aliasBase.substring(0, aliasBase.length() - 1); continue; } if (!nameList.contains(name)) { break; } j++; } } nameList.add(name); return name; } /** * Returns whether a collection contains precisely one distinct element. * Returns false if the collection is empty, or if it contains elements * that are not the same as each other. * * @param collection Collection * @return boolean true if all values are same */ public static <T> boolean areOccurencesEqual( Collection<T> collection) { Iterator<T> it = collection.iterator(); if (!it.hasNext()) { // Collection is empty return false; } T first = it.next(); while (it.hasNext()) { T t = it.next(); if (!t.equals(first)) { return false; } } return true; } /** * Creates a memory-, CPU- and cache-efficient immutable list. * * @param t Array of members of list * @param <T> Element type * @return List containing the given members */ public static <T> List<T> flatList(T... t) { return _flatList(t, false); } /** * Creates a memory-, CPU- and cache-efficient immutable list, * always copying the contents. * * @param t Array of members of list * @param <T> Element type * @return List containing the given members */ public static <T> List<T> flatListCopy(T... t) { return _flatList(t, true); } /** * Creates a memory-, CPU- and cache-efficient immutable list, optionally * copying the list. * * @param copy Whether to always copy the list * @param t Array of members of list * @return List containing the given members */ private static <T> List<T> _flatList(T[] t, boolean copy) { switch (t.length) { case 0: return Collections.emptyList(); case 1: return Collections.singletonList(t[0]); case 2: return new Flat2List<T>(t[0], t[1]); case 3: return new Flat3List<T>(t[0], t[1], t[2]); default: // REVIEW: AbstractList contains a modCount field; we could // write our own implementation and reduce creation overhead a // bit. if (copy) { return Arrays.asList(t.clone()); } else { return Arrays.asList(t); } } } /** * Creates a memory-, CPU- and cache-efficient immutable list from an * existing list. The list is always copied. * * @param t Array of members of list * @param <T> Element type * @return List containing the given members */ public static <T> List<T> flatList(List<T> t) { switch (t.size()) { case 0: return Collections.emptyList(); case 1: return Collections.singletonList(t.get(0)); case 2: return new Flat2List<T>(t.get(0), t.get(1)); case 3: return new Flat3List<T>(t.get(0), t.get(1), t.get(2)); default: // REVIEW: AbstractList contains a modCount field; we could // write our own implementation and reduce creation overhead a // bit. //noinspection unchecked return (List<T>) Arrays.asList(t.toArray()); } } /** * Parses a locale string. * * <p>The inverse operation of {@link java.util.Locale#toString()}. * * @param localeString Locale string, e.g. "en" or "en_US" * @return Java locale object */ public static Locale parseLocale(String localeString) { String[] strings = localeString.split("_"); switch (strings.length) { case 1: return new Locale(strings[0]); case 2: return new Locale(strings[0], strings[1]); case 3: return new Locale(strings[0], strings[1], strings[2]); default: throw newInternal( "bad locale string '" + localeString + "'"); } } private static final Map<String, String> TIME_UNITS = Olap4jUtil.mapOf( "ns", "NANOSECONDS", "us", "MICROSECONDS", "ms", "MILLISECONDS", "s", "SECONDS", "m", "MINUTES", "h", "HOURS", "d", "DAYS"); /** * Parses an interval. * * <p>For example, "30s" becomes (30, {@link TimeUnit#SECONDS}); * "2us" becomes (2, {@link TimeUnit#MICROSECONDS}).</p> * * <p>Units m (minutes), h (hours) and d (days) are only available * in JDK 1.6 or later, because the corresponding constants are missing * from {@link TimeUnit} in JDK 1.5.</p> * * @param s String to parse * @param unit Default time unit; may be null * * @return Pair of value and time unit. Neither pair or its components are * null * * @throws NumberFormatException if unit is not present and there is no * default, or if number is not valid */ public static Pair<Long, TimeUnit> parseInterval( String s, TimeUnit unit) throws NumberFormatException { final String original = s; for (Map.Entry<String, String> entry : TIME_UNITS.entrySet()) { final String abbrev = entry.getKey(); if (s.endsWith(abbrev)) { final String full = entry.getValue(); try { unit = TimeUnit.valueOf(full); s = s.substring(0, s.length() - abbrev.length()); break; } catch (IllegalArgumentException e) { // ignore - MINUTES, HOURS, DAYS are not defined in JDK1.5 } } } if (unit == null) { throw new NumberFormatException( "Invalid time interval '" + original + "'. Does not contain a " + "time unit. (Suffix may be ns (nanoseconds), " + "us (microseconds), ms (milliseconds), s (seconds), " + "h (hours), d (days). For example, '20s' means 20 seconds.)"); } try { return Pair.of(new BigDecimal(s).longValue(), unit); } catch (NumberFormatException e) { throw new NumberFormatException( "Invalid time interval '" + original + "'"); } } /** * Converts a list of olap4j-style segments to a list of mondrian-style * segments. * * @param olap4jSegmentList List of olap4j segments * @return List of mondrian segments */ public static List<Id.Segment> convert( List<IdentifierSegment> olap4jSegmentList) { final List<Id.Segment> list = new ArrayList<Id.Segment>(); for (IdentifierSegment olap4jSegment : olap4jSegmentList) { list.add(convert(olap4jSegment)); } return list; } /** * Converts an olap4j-style segment to a mondrian-style segment. * * @param olap4jSegment olap4j segment * @return mondrian segment */ public static Id.Segment convert(IdentifierSegment olap4jSegment) { if (olap4jSegment instanceof NameSegment) { return convert((NameSegment) olap4jSegment); } else { return convert((KeySegment) olap4jSegment); } } private static Id.KeySegment convert(final KeySegment keySegment) { return new Id.KeySegment( new AbstractList<Id.NameSegment>() { public Id.NameSegment get(int index) { return convert(keySegment.getKeyParts().get(index)); } public int size() { return keySegment.getKeyParts().size(); } }); } private static Id.NameSegment convert(NameSegment nameSegment) { return new Id.NameSegment( nameSegment.getName(), convert(nameSegment.getQuoting())); } private static Id.Quoting convert(Quoting quoting) { switch (quoting) { case QUOTED: return Id.Quoting.QUOTED; case UNQUOTED: return Id.Quoting.UNQUOTED; case KEY: return Id.Quoting.KEY; default: throw Util.unexpected(quoting); } } /** * Applies a collection of filters to an iterable. * * @param iterable Iterable * @param conds Zero or more conditions * @param <T> * @return Iterable that returns only members of underlying iterable for * for which all conditions evaluate to true */ public static <T> Iterable<T> filter( final Iterable<T> iterable, final Functor1<Boolean, T>... conds) { final Functor1<Boolean, T>[] conds2 = optimizeConditions(conds); if (conds2.length == 0) { return iterable; } return new Iterable<T>() { public Iterator<T> iterator() { return new Iterator<T>() { final Iterator<T> iterator = iterable.iterator(); T next; boolean hasNext = moveToNext(); private boolean moveToNext() { outer: while (iterator.hasNext()) { next = iterator.next(); for (Functor1<Boolean, T> cond : conds2) { if (!cond.apply(next)) { continue outer; } } return true; } return false; } public boolean hasNext() { return hasNext; } public T next() { T t = next; hasNext = moveToNext(); return t; } public void remove() { throw new UnsupportedOperationException(); } }; } }; } private static <T> Functor1<Boolean, T>[] optimizeConditions( Functor1<Boolean, T>[] conds) { final List<Functor1<Boolean, T>> functor1List = new ArrayList<Functor1<Boolean, T>>(Arrays.asList(conds)); for (Iterator<Functor1<Boolean, T>> funcIter = functor1List.iterator(); funcIter.hasNext();) { Functor1<Boolean, T> booleanTFunctor1 = funcIter.next(); if (booleanTFunctor1 == trueFunctor()) { funcIter.remove(); } } if (functor1List.size() < conds.length) { //noinspection unchecked return functor1List.toArray(new Functor1[functor1List.size()]); } else { return conds; } } /** * Sorts a collection of {@link Comparable} objects and returns a list. * * @param collection Collection * @param <T> Element type * @return Sorted list */ public static <T extends Comparable> List<T> sort( Collection<T> collection) { Object[] a = collection.toArray(new Object[collection.size()]); Arrays.sort(a); return cast(Arrays.asList(a)); } /** * Sorts a collection of objects using a {@link java.util.Comparator} and returns a * list. * * @param collection Collection * @param comparator Comparator * @param <T> Element type * @return Sorted list */ public static <T> List<T> sort( Collection<T> collection, Comparator<T> comparator) { Object[] a = collection.toArray(new Object[collection.size()]); //noinspection unchecked Arrays.sort(a, (Comparator<? super Object>) comparator); return cast(Arrays.asList(a)); } public static List<IdentifierSegment> toOlap4j( final List<Id.Segment> segments) { return new AbstractList<IdentifierSegment>() { public IdentifierSegment get(int index) { return toOlap4j(segments.get(index)); } public int size() { return segments.size(); } }; } public static IdentifierSegment toOlap4j(Id.Segment segment) { switch (segment.quoting) { case KEY: return toOlap4j((Id.KeySegment) segment); default: return toOlap4j((Id.NameSegment) segment); } } private static KeySegment toOlap4j(final Id.KeySegment keySegment) { return new KeySegment( new AbstractList<NameSegment>() { public NameSegment get(int index) { return toOlap4j(keySegment.subSegmentList.get(index)); } public int size() { return keySegment.subSegmentList.size(); } }); } private static NameSegment toOlap4j(Id.NameSegment nameSegment) { return new NameSegment( null, nameSegment.name, toOlap4j(nameSegment.quoting)); } public static Quoting toOlap4j(Id.Quoting quoting) { return Quoting.valueOf(quoting.name()); } // TODO: move to IdentifierSegment public static boolean matches(IdentifierSegment segment, String name) { switch (segment.getQuoting()) { case KEY: return false; // FIXME case QUOTED: return equalName(segment.getName(), name); case UNQUOTED: return segment.getName().equalsIgnoreCase(name); default: throw unexpected(segment.getQuoting()); } } public static boolean matches( Member member, List<Id.Segment> nameParts) { if (Util.equalName(Util.implode(nameParts), member.getUniqueName())) { // exact match return true; } Id.Segment segment = nameParts.get(nameParts.size() - 1); while (member.getParentMember() != null) { if (!segment.matches(member.getName())) { return false; } member = member.getParentMember(); nameParts = nameParts.subList(0, nameParts.size() - 1); segment = nameParts.get(nameParts.size() - 1); } if (segment.matches(member.getName())) { return Util.equalName( member.getHierarchy().getUniqueName(), Util.implode(nameParts.subList(0, nameParts.size() - 1))); } else if (member.isAll()) { return Util.equalName( member.getHierarchy().getUniqueName(), Util.implode(nameParts)); } else { return false; } } public static RuntimeException newElementNotFoundException( int category, IdentifierNode identifierNode) { String type; switch (category) { case Category.Member: return MondrianResource.instance().MemberNotFound.ex( identifierNode.toString()); case Category.Unknown: type = "Element"; break; default: type = Category.instance().getDescription(category); } return newError(type + " '" + identifierNode + "' not found"); } /** * Calls {@link java.util.concurrent.Future#get()} and converts any * throwable into a non-checked exception. * * @param future Future * @param message Message to qualify wrapped exception * @param <T> Result type * @return Result */ public static <T> T safeGet(Future<T> future, String message) { try { return future.get(); } catch (InterruptedException e) { throw newError(e, message); } catch (ExecutionException e) { final Throwable cause = e.getCause(); if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else if (cause instanceof Error) { throw (Error) cause; } else { throw newError(cause, message); } } } public static <T> Set<T> newIdentityHashSetFake() { final HashMap<T, Boolean> map = new HashMap<T, Boolean>(); return new Set<T>() { public int size() { return map.size(); } public boolean isEmpty() { return map.isEmpty(); } public boolean contains(Object o) { return map.containsKey(o); } public Iterator<T> iterator() { return map.keySet().iterator(); } public Object[] toArray() { return map.keySet().toArray(); } public <T> T[] toArray(T[] a) { return map.keySet().toArray(a); } public boolean add(T t) { return map.put(t, Boolean.TRUE) == null; } public boolean remove(Object o) { return map.remove(o) == Boolean.TRUE; } public boolean containsAll(Collection<?> c) { return map.keySet().containsAll(c); } public boolean addAll(Collection<? extends T> c) { throw new UnsupportedOperationException(); } public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException(); } public boolean removeAll(Collection<?> c) { throw new UnsupportedOperationException(); } public void clear() { map.clear(); } }; } /** * Equivalent to {@link Timer#Timer(String, boolean)}. * (Introduced in JDK 1.5.) * * @param name the name of the associated thread * @param isDaemon true if the associated thread should run as a daemon * @return timer */ public static Timer newTimer(String name, boolean isDaemon) { return compatible.newTimer(name, isDaemon); } /** * As Arrays#binarySearch(Object[], int, int, Object), but * available pre-JDK 1.6. */ public static <T extends Comparable<T>> int binarySearch( T[] ts, int start, int end, T t) { return compatible.binarySearch(ts, start, end, t); } /** * Returns the intersection of two sorted sets. Does not modify either set. * * <p>Optimized for the case that both sets are {@link ArraySortedSet}.</p> * * @param set1 First set * @param set2 Second set * @return Intersection of the sets */ public static <E extends Comparable> SortedSet<E> intersect( SortedSet<E> set1, SortedSet<E> set2) { if (set1.isEmpty()) { return set1; } if (set2.isEmpty()) { return set2; } if (!(set1 instanceof ArraySortedSet) || !(set2 instanceof ArraySortedSet)) { final TreeSet<E> set = new TreeSet<E>(set1); set.retainAll(set2); return set; } final Comparable<?>[] result = new Comparable[Math.min(set1.size(), set2.size())]; final Iterator<E> it1 = set1.iterator(); final Iterator<E> it2 = set2.iterator(); int i = 0; E e1 = it1.next(); E e2 = it2.next(); for (;;) { final int compare = e1.compareTo(e2); if (compare == 0) { result[i++] = e1; if (!it1.hasNext() || !it2.hasNext()) { break; } e1 = it1.next(); e2 = it2.next(); } else if (compare == 1) { if (!it2.hasNext()) { break; } e2 = it2.next(); } else { if (!it1.hasNext()) { break; } e1 = it1.next(); } } return new ArraySortedSet(result, 0, i); } /** * Compares two integers using the same algorithm as * {@link Integer#compareTo(Integer)}. * * @param i0 First integer * @param i1 Second integer * @return Comparison */ public static int compareIntegers(int i0, int i1) { return (i0 < i1 ? -1 : (i0 == i1 ? 0 : 1)); } /** * Returns the last item in a list. * * @param list List * @param <T> Element type * @return Last item in the list * @throws IndexOutOfBoundsException if list is empty */ public static <T> T last(List<T> list) { return list.get(list.size() - 1); } /** * Returns the sole item in a list. * * <p>If the list has 0 or more than one element, throws.</p> * * @param list List * @param <T> Element type * @return Sole item in the list * @throws IndexOutOfBoundsException if list is empty or has more than 1 elt */ public static <T> T only(List<T> list) { if (list.size() != 1) { throw new IndexOutOfBoundsException( "list " + list + " has " + list.size() + " elements, expected 1"); } return list.get(0); } /** * Closes a JDBC result set, statement, and connection, ignoring any errors. * If any of them are null, that's fine. * * <p>If any of them throws a {@link SQLException}, returns the first * such exception, but always executes all closes.</p> * * @param resultSet Result set * @param statement Statement * @param connection Connection */ public static SQLException close( ResultSet resultSet, Statement statement, Connection connection) { SQLException firstException = null; if (resultSet != null) { try { if (statement == null) { statement = resultSet.getStatement(); } resultSet.close(); } catch (Throwable t) { firstException = new SQLException(); firstException.initCause(t); } } if (statement != null) { try { statement.close(); } catch (Throwable t) { if (firstException == null) { firstException = new SQLException(); firstException.initCause(t); } } } if (connection != null) { try { connection.close(); } catch (Throwable t) { if (firstException == null) { firstException = new SQLException(); firstException.initCause(t); } } } return firstException; } /** * Creates a bitset with bits from {@code fromIndex} (inclusive) to * specified {@code toIndex} (exclusive) set to {@code true}. * * <p>For example, {@code bitSetBetween(0, 3)} returns a bit set with bits * {0, 1, 2} set. * * @param fromIndex Index of the first bit to be set. * @param toIndex Index after the last bit to be set. * @return Bit set */ public static BitSet bitSetBetween(int fromIndex, int toIndex) { final BitSet bitSet = new BitSet(); if (toIndex > fromIndex) { // Avoid http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6222207 // "BitSet internal invariants may be violated" bitSet.set(fromIndex, toIndex); } return bitSet; } public static class ErrorCellValue { public String toString() { return "#ERR"; } } @SuppressWarnings({"unchecked"}) public static <T> T[] genericArray(Class<T> clazz, int size) { return (T[]) Array.newInstance(clazz, size); } /** * Throws an internal error if condition is not true. It would be called * <code>assert</code>, but that is a keyword as of JDK 1.4. */ public static void assertTrue(boolean b) { if (!b) { throw newInternal("assert failed"); } } /** * Throws an internal error with the given messagee if condition is not * true. It would be called <code>assert</code>, but that is a keyword as * of JDK 1.4. */ public static void assertTrue(boolean b, String message) { if (!b) { throw newInternal("assert failed: " + message); } } /** * Creates an internal error with a given message. */ public static RuntimeException newInternal(String message) { return MondrianResource.instance().Internal.ex(message); } /** * Creates an internal error with a given message and cause. */ public static RuntimeException newInternal(Throwable e, String message) { return MondrianResource.instance().Internal.ex(message, e); } /** * Creates a non-internal error. Currently implemented in terms of * internal errors, but later we will create resourced messages. */ public static RuntimeException newError(String message) { return newInternal(message); } /** * Creates a non-internal error. Currently implemented in terms of * internal errors, but later we will create resourced messages. */ public static RuntimeException newError(Throwable e, String message) { return newInternal(e, message); } /** * Returns an exception indicating that we didn't expect to find this value * here. * * @param value Value */ public static RuntimeException unexpected(Enum value) { return Util.newInternal( "Was not expecting value '" + value + "' for enumeration '" + value.getClass().getName() + "' in this context"); } /** * Checks that a precondition (declared using the javadoc <code>@pre</code> * tag) is satisfied. * * @param b The value of executing the condition */ public static void assertPrecondition(boolean b) { assertTrue(b); } /** * Checks that a precondition (declared using the javadoc <code>@pre</code> * tag) is satisfied. For example, * * <blockquote><pre>void f(String s) { * Util.assertPrecondition(s != null, "s != null"); * ... * }</pre></blockquote> * * @param b The value of executing the condition * @param condition The text of the condition */ public static void assertPrecondition(boolean b, String condition) { assertTrue(b, condition); } /** * Checks that a postcondition (declared using the javadoc * <code>@post</code> tag) is satisfied. * * @param b The value of executing the condition */ public static void assertPostcondition(boolean b) { assertTrue(b); } /** * Checks that a postcondition (declared using the javadoc * <code>@post</code> tag) is satisfied. * * @param b The value of executing the condition */ public static void assertPostcondition(boolean b, String condition) { assertTrue(b, condition); } /** * Converts an error into an array of strings, the most recent error first. * * @param e the error; may be null. Errors are chained according to their * {@link Throwable#getCause cause}. */ public static String[] convertStackToString(Throwable e) { List<String> list = new ArrayList<String>(); while (e != null) { String sMsg = getErrorMessage(e); list.add(sMsg); e = e.getCause(); } return list.toArray(new String[list.size()]); } /** * Constructs the message associated with an arbitrary Java error, making * up one based on the stack trace if there is none. As * {@link #getErrorMessage(Throwable,boolean)}, but does not print the * class name if the exception is derived from {@link java.sql.SQLException} * or is exactly a {@link java.lang.Exception}. */ public static String getErrorMessage(Throwable err) { boolean prependClassName = !(err instanceof java.sql.SQLException || err.getClass() == java.lang.Exception.class); return getErrorMessage(err, prependClassName); } /** * Constructs the message associated with an arbitrary Java error, making * up one based on the stack trace if there is none. * * @param err the error * @param prependClassName should the error be preceded by the * class name of the Java exception? defaults to false, unless the error * is derived from {@link java.sql.SQLException} or is exactly a {@link * java.lang.Exception} */ public static String getErrorMessage( Throwable err, boolean prependClassName) { String errMsg = err.getMessage(); if ((errMsg == null) || (err instanceof RuntimeException)) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); err.printStackTrace(pw); return sw.toString(); } else { return (prependClassName) ? err.getClass().getName() + ": " + errMsg : errMsg; } } /** * If one of the causes of an exception is of a particular class, returns * that cause. Otherwise returns null. * * @param e Exception * @param clazz Desired class * @param <T> Class * @return Cause of given class, or null */ public static <T extends Throwable> T getMatchingCause(Throwable e, Class<T> clazz) { for (;;) { if (clazz.isInstance(e)) { return clazz.cast(e); } final Throwable cause = e.getCause(); if (cause == null || cause == e) { return null; } e = cause; } } /** * Converts an expression to a string. */ public static String unparse(Exp exp) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); exp.unparse(pw); return sw.toString(); } /** * Converts an query to a string. */ public static String unparse(Query query) { StringWriter sw = new StringWriter(); PrintWriter pw = new QueryPrintWriter(sw); query.unparse(pw); return sw.toString(); } /** * Creates a file-protocol URL for the given file. */ public static URL toURL(File file) throws MalformedURLException { String path = file.getAbsolutePath(); // This is a bunch of weird code that is required to // make a valid URL on the Windows platform, due // to inconsistencies in what getAbsolutePath returns. String fs = System.getProperty("file.separator"); if (fs.length() == 1) { char sep = fs.charAt(0); if (sep != '/') { path = path.replace(sep, '/'); } if (path.charAt(0) != '/') { path = '/' + path; } } path = "file://" + path; return new URL(path); } /** * <code>PropertyList</code> is an order-preserving list of key-value * pairs. Lookup is case-insensitive, but the case of keys is preserved. */ public static class PropertyList implements Iterable<Pair<String, String>>, Serializable { List<Pair<String, String>> list = new ArrayList<Pair<String, String>>(); public PropertyList() { this.list = new ArrayList<Pair<String, String>>(); } private PropertyList(List<Pair<String, String>> list) { this.list = list; } @SuppressWarnings({"CloneDoesntCallSuperClone"}) @Override public PropertyList clone() { return new PropertyList(new ArrayList<Pair<String, String>>(list)); } public String get(String key) { return get(key, null); } public String get(String key, String defaultValue) { for (int i = 0, n = list.size(); i < n; i++) { Pair<String, String> pair = list.get(i); if (pair.left.equalsIgnoreCase(key)) { return pair.right; } } return defaultValue; } public String put(String key, String value) { for (int i = 0, n = list.size(); i < n; i++) { Pair<String, String> pair = list.get(i); if (pair.left.equalsIgnoreCase(key)) { String old = pair.right; if (key.equalsIgnoreCase("Provider")) { // Unlike all other properties, later values of // "Provider" do not supersede } else { pair.right = value; } return old; } } list.add(new Pair<String, String>(key, value)); return null; } public boolean remove(String key) { boolean found = false; for (int i = 0; i < list.size(); i++) { Pair<String, String> pair = list.get(i); if (pair.getKey().equalsIgnoreCase(key)) { list.remove(i); found = true; --i; } } return found; } public String toString() { StringBuilder sb = new StringBuilder(64); for (int i = 0, n = list.size(); i < n; i++) { Pair<String, String> pair = list.get(i); if (i > 0) { sb.append("; "); } sb.append(pair.left); sb.append('='); final String right = pair.right; if (right == null) { sb.append("'null'"); } else { // Quote a property value if is has a semi colon in it // 'xxx;yyy'. Escape any single-quotes by doubling them. final int needsQuote = right.indexOf(';'); if (needsQuote >= 0) { // REVIEW: This logic leaves off the leading/trailing // quote if the property value already has a // leading/trailing quote. Doesn't seem right to me. if (right.charAt(0) != '\'') { sb.append("'"); } sb.append(replace(right, "'", "''")); if (right.charAt(right.length() - 1) != '\'') { sb.append("'"); } } else { sb.append(right); } } } return sb.toString(); } public Iterator<Pair<String, String>> iterator() { return list.iterator(); } } /** * Converts an OLE DB connect string into a {@link PropertyList}. * * <p> For example, <code>"Provider=MSOLAP; DataSource=LOCALHOST;"</code> * becomes the set of (key, value) pairs <code>{("Provider","MSOLAP"), * ("DataSource", "LOCALHOST")}</code>. Another example is * <code>Provider='sqloledb';Data Source='MySqlServer';Initial * Catalog='Pubs';Integrated Security='SSPI';</code>. * * <p> This method implements as much as possible of the <a * href="http://msdn.microsoft.com/library/en-us/oledb/htm/oledbconnectionstringsyntax.asp" * target="_blank">OLE DB connect string syntax * specification</a>. To find what it <em>actually</em> does, take * a look at the <code>mondrian.olap.UtilTestCase</code> test case. */ public static PropertyList parseConnectString(String s) { return new ConnectStringParser(s).parse(); } private static class ConnectStringParser { private final String s; private final int n; private int i; private final StringBuilder nameBuf; private final StringBuilder valueBuf; private ConnectStringParser(String s) { this.s = s; this.i = 0; this.n = s.length(); this.nameBuf = new StringBuilder(64); this.valueBuf = new StringBuilder(64); } PropertyList parse() { PropertyList list = new PropertyList(); while (i < n) { parsePair(list); } return list; } /** * Reads "name=value;" or "name=value<EOF>". */ void parsePair(PropertyList list) { String name = parseName(); if (name == null) { return; } String value; if (i >= n) { value = ""; } else if (s.charAt(i) == ';') { i++; value = ""; } else { value = parseValue(); } list.put(name, value); } /** * Reads "name=". Name can contain equals sign if equals sign is * doubled. Returns null if there is no name to read. */ String parseName() { nameBuf.setLength(0); while (true) { char c = s.charAt(i); switch (c) { case '=': i++; if (i < n && (c = s.charAt(i)) == '=') { // doubled equals sign; take one of them, and carry on i++; nameBuf.append(c); break; } String name = nameBuf.toString(); name = name.trim(); return name; case ' ': if (nameBuf.length() == 0) { // ignore preceding spaces i++; if (i >= n) { // there is no name, e.g. trailing spaces after // semicolon, 'x=1; y=2; ' return null; } break; } else { // fall through } default: nameBuf.append(c); i++; if (i >= n) { return nameBuf.toString().trim(); } } } } /** * Reads "value;" or "value<EOF>" */ String parseValue() { char c; // skip over leading white space while ((c = s.charAt(i)) == ' ') { i++; if (i >= n) { return ""; } } if (c == '"' || c == '\'') { String value = parseQuoted(c); // skip over trailing white space while (i < n && (c = s.charAt(i)) == ' ') { i++; } if (i >= n) { return value; } else if (s.charAt(i) == ';') { i++; return value; } else { throw new RuntimeException( "quoted value ended too soon, at position " + i + " in '" + s + "'"); } } else { String value; int semi = s.indexOf(';', i); if (semi >= 0) { value = s.substring(i, semi); i = semi + 1; } else { value = s.substring(i); i = n; } return value.trim(); } } /** * Reads a string quoted by a given character. Occurrences of the * quoting character must be doubled. For example, * <code>parseQuoted('"')</code> reads <code>"a ""new"" string"</code> * and returns <code>a "new" string</code>. */ String parseQuoted(char q) { char c = s.charAt(i++); Util.assertTrue(c == q); valueBuf.setLength(0); while (i < n) { c = s.charAt(i); if (c == q) { i++; if (i < n) { c = s.charAt(i); if (c == q) { valueBuf.append(c); i++; continue; } } return valueBuf.toString(); } else { valueBuf.append(c); i++; } } throw new RuntimeException( "Connect string '" + s + "' contains unterminated quoted value '" + valueBuf.toString() + "'"); } } /** * Combines two integers into a hash code. */ public static int hash(int i, int j) { return (i << 4) ^ j; } /** * Computes a hash code from an existing hash code and an object (which * may be null). */ public static int hash(int h, Object o) { int k = (o == null) ? 0 : o.hashCode(); return ((h << 4) | h) ^ k; } /** * Computes a hash code from an existing hash code and an array of objects * (which may be null). */ public static int hashArray(int h, Object [] a) { // The hashcode for a null array and an empty array should be different // than h, so use magic numbers. if (a == null) { return hash(h, 19690429); } if (a.length == 0) { return hash(h, 19690721); } for (Object anA : a) { h = hash(h, anA); } return h; } /** * Concatenates one or more arrays. * * <p>Resulting array has same element type as first array. Each arrays may * be empty, but must not be null. * * @param a0 First array * @param as Zero or more subsequent arrays * @return Array containing all elements */ public static <T> T[] appendArrays( T[] a0, T[]... as) { int n = a0.length; for (T[] a : as) { n += a.length; } T[] copy = Util.copyOf(a0, n); n = a0.length; for (T[] a : as) { System.arraycopy(a, 0, copy, n, a.length); n += a.length; } return copy; } /** * Adds an object to the end of an array. The resulting array is of the * same type (e.g. <code>String[]</code>) as the input array. * * @param a Array * @param o Element * @return New array containing original array plus element * * @see #appendArrays */ public static <T> T[] append(T[] a, T o) { T[] a2 = Util.copyOf(a, a.length + 1); a2[a.length] = o; return a2; } /** * Like <code>{@link java.util.Arrays}.copyOf(double[], int)</code>, but * exists prior to JDK 1.6. * * @param original the array to be copied * @param newLength the length of the copy to be returned * @return a copy of the original array, truncated or padded with zeros * to obtain the specified length */ public static double[] copyOf(double[] original, int newLength) { double[] copy = new double[newLength]; System.arraycopy( original, 0, copy, 0, Math.min(original.length, newLength)); return copy; } /** * Like <code>{@link java.util.Arrays}.copyOf(int[], int)</code>, but * exists prior to JDK 1.6. * * @param original the array to be copied * @param newLength the length of the copy to be returned * @return a copy of the original array, truncated or padded with zeros * to obtain the specified length */ public static int[] copyOf(int[] original, int newLength) { int[] copy = new int[newLength]; System.arraycopy( original, 0, copy, 0, Math.min(original.length, newLength)); return copy; } /** * Like <code>{@link java.util.Arrays}.copyOf(long[], int)</code>, but * exists prior to JDK 1.6. * * @param original the array to be copied * @param newLength the length of the copy to be returned * @return a copy of the original array, truncated or padded with zeros * to obtain the specified length */ public static long[] copyOf(long[] original, int newLength) { long[] copy = new long[newLength]; System.arraycopy( original, 0, copy, 0, Math.min(original.length, newLength)); return copy; } /** * Like <code>{@link java.util.Arrays}.copyOf(Object[], int)</code>, but * exists prior to JDK 1.6. * * @param original the array to be copied * @param newLength the length of the copy to be returned * @return a copy of the original array, truncated or padded with zeros * to obtain the specified length */ public static <T> T[] copyOf(T[] original, int newLength) { //noinspection unchecked return (T[]) copyOf(original, newLength, original.getClass()); } /** * Copies the specified array. * * @param original the array to be copied * @param newLength the length of the copy to be returned * @param newType the class of the copy to be returned * @return a copy of the original array, truncated or padded with nulls * to obtain the specified length */ public static <T, U> T[] copyOf( U[] original, int newLength, Class<? extends T[]> newType) { @SuppressWarnings({"unchecked", "RedundantCast"}) T[] copy = ((Object)newType == (Object)Object[].class) ? (T[]) new Object[newLength] : (T[]) Array.newInstance(newType.getComponentType(), newLength); //noinspection SuspiciousSystemArraycopy System.arraycopy( original, 0, copy, 0, Math.min(original.length, newLength)); return copy; } /** * Returns the cumulative amount of time spent accessing the database. * * @deprecated Use {@link mondrian.server.monitor.Monitor#getServer()} and * {@link mondrian.server.monitor.ServerInfo#sqlStatementExecuteNanos}; * will be removed in 4.0. */ public static long dbTimeMillis() { return databaseMillis; } /** * Adds to the cumulative amount of time spent accessing the database. * * @deprecated Will be removed in 4.0. */ public static void addDatabaseTime(long millis) { databaseMillis += millis; } /** * Returns the system time less the time spent accessing the database. * Use this method to figure out how long an operation took: call this * method before an operation and after an operation, and the difference * is the amount of non-database time spent. * * @deprecated Will be removed in 4.0. */ public static long nonDbTimeMillis() { final long systemMillis = System.currentTimeMillis(); return systemMillis - databaseMillis; } /** * Creates a very simple implementation of {@link Validator}. (Only * useful for resolving trivial expressions.) */ public static Validator createSimpleValidator(final FunTable funTable) { return new Validator() { public Query getQuery() { return null; } public SchemaReader getSchemaReader() { throw new UnsupportedOperationException(); } public Exp validate(Exp exp, boolean scalar) { return exp; } public void validate(ParameterExpr parameterExpr) { } public void validate(MemberProperty memberProperty) { } public void validate(QueryAxis axis) { } public void validate(Formula formula) { } public FunDef getDef(Exp[] args, String name, Syntax syntax) { // Very simple resolution. Assumes that there is precisely // one resolver (i.e. no overloading) and no argument // conversions are necessary. List<Resolver> resolvers = funTable.getResolvers(name, syntax); final Resolver resolver = resolvers.get(0); final List<Resolver.Conversion> conversionList = new ArrayList<Resolver.Conversion>(); final FunDef def = resolver.resolve(args, this, conversionList); assert conversionList.isEmpty(); return def; } public boolean alwaysResolveFunDef() { return false; } public boolean canConvert( int ordinal, Exp fromExp, int to, List<Resolver.Conversion> conversions) { return true; } public boolean requiresExpression() { return false; } public FunTable getFunTable() { return funTable; } public Parameter createOrLookupParam( boolean definition, String name, Type type, Exp defaultExp, String description) { return null; } }; } /** * Reads a Reader until it returns EOF and returns the contents as a String. * * @param rdr Reader to Read. * @param bufferSize size of buffer to allocate for reading. * @return content of Reader as String * @throws IOException on I/O error */ public static String readFully(final Reader rdr, final int bufferSize) throws IOException { if (bufferSize <= 0) { throw new IllegalArgumentException( "Buffer size must be greater than 0"); } final char[] buffer = new char[bufferSize]; final StringBuilder buf = new StringBuilder(bufferSize); int len; while ((len = rdr.read(buffer)) != -1) { buf.append(buffer, 0, len); } return buf.toString(); } /** * Reads an input stream until it returns EOF and returns the contents as an * array of bytes. * * @param in Input stream * @param bufferSize size of buffer to allocate for reading. * @return content of stream as an array of bytes * @throws IOException on I/O error */ public static byte[] readFully(final InputStream in, final int bufferSize) throws IOException { if (bufferSize <= 0) { throw new IllegalArgumentException( "Buffer size must be greater than 0"); } final byte[] buffer = new byte[bufferSize]; final ByteArrayOutputStream baos = new ByteArrayOutputStream(bufferSize); int len; while ((len = in.read(buffer)) != -1) { baos.write(buffer, 0, len); } return baos.toByteArray(); } /** * Returns the contents of a URL, substituting tokens. * * <p>Replaces the tokens "${key}" if the map is not null and "key" occurs * in the key-value map. * * <p>If the URL string starts with "inline:" the contents are the * rest of the URL. * * @param urlStr URL string * @param map Key/value map * @return Contents of URL with tokens substituted * @throws IOException on I/O error */ public static String readURL(final String urlStr, Map<String, String> map) throws IOException { if (urlStr.startsWith("inline:")) { String content = urlStr.substring("inline:".length()); if (map != null) { content = Util.replaceProperties(content, map); } return content; } else { final URL url = new URL(urlStr); return readURL(url, map); } } /** * Returns the contents of a URL. * * @param url URL * @return Contents of URL * @throws IOException on I/O error */ public static String readURL(final URL url) throws IOException { return readURL(url, null); } /** * Returns the contents of a URL, substituting tokens. * * <p>Replaces the tokens "${key}" if the map is not null and "key" occurs * in the key-value map. * * @param url URL * @param map Key/value map * @return Contents of URL with tokens substituted * @throws IOException on I/O error */ public static String readURL( final URL url, Map<String, String> map) throws IOException { final Reader r = new BufferedReader(new InputStreamReader(url.openStream())); final int BUF_SIZE = 8096; try { String xmlCatalog = readFully(r, BUF_SIZE); xmlCatalog = Util.replaceProperties(xmlCatalog, map); return xmlCatalog; } finally { r.close(); } } /** * Gets content via Apache VFS. File must exist and have content * * @param url String * @return Apache VFS FileContent for further processing * @throws FileSystemException on error */ public static InputStream readVirtualFile(String url) throws FileSystemException { // Treat catalogUrl as an Apache VFS (Virtual File System) URL. // VFS handles all of the usual protocols (http:, file:) // and then some. FileSystemManager fsManager = VFS.getManager(); if (fsManager == null) { throw newError("Cannot get virtual file system manager"); } // Workaround VFS bug. if (url.startsWith("file://localhost")) { url = url.substring("file://localhost".length()); } if (url.startsWith("file:")) { url = url.substring("file:".length()); } // work around for VFS bug not closing http sockets // (Mondrian-585) if (url.startsWith("http")) { try { return new URL(url).openStream(); } catch (IOException e) { throw newError( "Could not read URL: " + url); } } File userDir = new File("").getAbsoluteFile(); FileObject file = fsManager.resolveFile(userDir, url); FileContent fileContent = null; try { // Because of VFS caching, make sure we refresh to get the latest // file content. This refresh may possibly solve the following // workaround for defect MONDRIAN-508, but cannot be tested, so we // will leave the work around for now. file.refresh(); // Workaround to defect MONDRIAN-508. For HttpFileObjects, verifies // the URL of the file retrieved matches the URL passed in. A VFS // cache bug can cause it to treat URLs with different parameters // as the same file (e.g. http://blah.com?param=A, // http://blah.com?param=B) if (file instanceof HttpFileObject && !file.getName().getURI().equals(url)) { fsManager.getFilesCache().removeFile( file.getFileSystem(), file.getName()); file = fsManager.resolveFile(userDir, url); } if (!file.isReadable()) { throw newError( "Virtual file is not readable: " + url); } fileContent = file.getContent(); } finally { file.close(); } if (fileContent == null) { throw newError( "Cannot get virtual file content: " + url); } return fileContent.getInputStream(); } public static String readVirtualFileAsString( String catalogUrl) throws IOException { InputStream in = readVirtualFile(catalogUrl); try { return IOUtils.toString(in); } finally { IOUtils.closeQuietly(in); } } /** * Converts a {@link Properties} object to a string-to-string {@link Map}. * * @param properties Properties * @return String-to-string map */ public static Map<String, String> toMap(final Properties properties) { return new AbstractMap<String, String>() { @SuppressWarnings({"unchecked"}) public Set<Entry<String, String>> entrySet() { return (Set) properties.entrySet(); } }; } /** * Replaces tokens in a string. * * <p>Replaces the tokens "${key}" if "key" occurs in the key-value map. * Otherwise "${key}" is left in the string unchanged. * * @param text Source string * @param env Map of key-value pairs * @return String with tokens substituted */ public static String replaceProperties( String text, Map<String, String> env) { // As of JDK 1.5, cannot use StringBuilder - appendReplacement requires // the antediluvian StringBuffer. StringBuffer buf = new StringBuffer(text.length() + 200); Pattern pattern = Pattern.compile("\\$\\{([^${}]+)\\}"); Matcher matcher = pattern.matcher(text); while (matcher.find()) { String varName = matcher.group(1); String varValue = env.get(varName); if (varValue != null) { matcher.appendReplacement(buf, varValue); } else { matcher.appendReplacement(buf, "\\${$1}"); } } matcher.appendTail(buf); return buf.toString(); } public static String printMemory() { return printMemory(null); } public static String printMemory(String msg) { final Runtime rt = Runtime.getRuntime(); final long freeMemory = rt.freeMemory(); final long totalMemory = rt.totalMemory(); final StringBuilder buf = new StringBuilder(64); buf.append("FREE_MEMORY:"); if (msg != null) { buf.append(msg); buf.append(':'); } buf.append(' '); buf.append(freeMemory / 1024); buf.append("kb "); long hundredths = (freeMemory * 10000) / totalMemory; buf.append(hundredths / 100); hundredths %= 100; if (hundredths >= 10) { buf.append('.'); } else { buf.append(".0"); } buf.append(hundredths); buf.append('%'); return buf.toString(); } /** * Casts a Set to a Set with a different element type. * * @param set Set * @return Set of desired type */ @SuppressWarnings({"unchecked"}) public static <T> Set<T> cast(Set<?> set) { return (Set<T>) set; } /** * Casts a List to a List with a different element type. * * @param list List * @return List of desired type */ @SuppressWarnings({"unchecked"}) public static <T> List<T> cast(List<?> list) { return (List<T>) list; } /** * Returns whether it is safe to cast a collection to a collection with a * given element type. * * @param collection Collection * @param clazz Target element type * @param <T> Element type * @return Whether all not-null elements of the collection are instances of * element type */ public static <T> boolean canCast( Collection<?> collection, Class<T> clazz) { for (Object o : collection) { if (o != null && !clazz.isInstance(o)) { return false; } } return true; } /** * Casts a collection to iterable. * * Under JDK 1.4, {@link Collection} objects do not implement * {@link Iterable}, so this method inserts a casting wrapper. (Since * Iterable does not exist under JDK 1.4, they will have been compiled * under JDK 1.5 or later, then retrowoven to 1.4 class format. References * to Iterable will have been replaced with references to * <code>com.rc.retroweaver.runtime.Retroweaver_</code>. * * <p>Under later JDKs this method is trivial. This method can be deleted * when we discontinue support for JDK 1.4. * * @param iterable Object which ought to be iterable * @param <T> Element type * @return Object cast to Iterable */ public static <T> Iterable<T> castToIterable( final Object iterable) { if (Util.Retrowoven && !(iterable instanceof Iterable)) { return new Iterable<T>() { public Iterator<T> iterator() { return ((Collection<T>) iterable).iterator(); } }; } return (Iterable<T>) iterable; } /** * Looks up an enumeration by name, returning null if null or not valid. * * @param clazz Enumerated type * @param name Name of constant */ public static <E extends Enum<E>> E lookup(Class<E> clazz, String name) { return lookup(clazz, name, null); } /** * Looks up an enumeration by name, returning a given default value if null * or not valid. * * @param clazz Enumerated type * @param name Name of constant * @param defaultValue Default value if constant is not found * @return Value, or null if name is null or value does not exist */ public static <E extends Enum<E>> E lookup( Class<E> clazz, String name, E defaultValue) { if (name == null) { return defaultValue; } try { return Enum.valueOf(clazz, name); } catch (IllegalArgumentException e) { return defaultValue; } } /** * Make a BigDecimal from a double. On JDK 1.5 or later, the BigDecimal * precision reflects the precision of the double while with JDK 1.4 * this is not the case. * * @param d the input double * @return the BigDecimal */ public static BigDecimal makeBigDecimalFromDouble(double d) { return compatible.makeBigDecimalFromDouble(d); } /** * Returns a literal pattern String for the specified String. * * <p>Specification as for {@link Pattern#quote(String)}, which was * introduced in JDK 1.5. * * @param s The string to be literalized * @return A literal string replacement */ public static String quotePattern(String s) { return compatible.quotePattern(s); } /** * Generates a unique id. * * <p>From JDK 1.5 onwards, uses a {@code UUID}. * * @return A unique id */ public static String generateUuidString() { return compatible.generateUuidString(); } /** * Compiles a script to yield a Java interface. * * <p>Only valid JDK 1.6 and higher; fails on JDK 1.5 and earlier.</p> * * @param iface Interface script should implement * @param script Script code * @param engineName Name of engine (e.g. "JavaScript") * @param <T> Interface * @return Object that implements given interface */ public static <T> T compileScript( Class<T> iface, String script, String engineName) { return compatible.compileScript(iface, script, engineName); } /** * Removes a thread local from the current thread. * * <p>From JDK 1.5 onwards, calls {@link ThreadLocal#remove()}; before * that, no-ops.</p> * * @param threadLocal Thread local * @param <T> Type */ public static <T> void threadLocalRemove(ThreadLocal<T> threadLocal) { compatible.threadLocalRemove(threadLocal); } /** * Creates a hash set that, like {@link java.util.IdentityHashMap}, * compares keys using identity. * * @param <T> Element type * @return Set */ public static <T> Set<T> newIdentityHashSet() { return compatible.newIdentityHashSet(); } /** * Creates a new udf instance from the given udf class. * * @param udfClass the class to create new instance for * @param functionName Function name, or null * @return an instance of UserDefinedFunction */ public static UserDefinedFunction createUdf( Class<? extends UserDefinedFunction> udfClass, String functionName) { // Instantiate class with default constructor. UserDefinedFunction udf; String className = udfClass.getName(); String functionNameOrEmpty = functionName == null ? "" : functionName; // Find a constructor. Constructor<?> constructor; Object[] args = {}; // 0. Check that class is public and top-level or static. if (!Modifier.isPublic(udfClass.getModifiers()) || (udfClass.getEnclosingClass() != null && !Modifier.isStatic(udfClass.getModifiers()))) { throw MondrianResource.instance().UdfClassMustBePublicAndStatic.ex( functionName, className); } // 1. Look for a constructor "public Udf(String name)". try { constructor = udfClass.getConstructor(String.class); if (Modifier.isPublic(constructor.getModifiers())) { args = new Object[] {functionName}; } else { constructor = null; } } catch (NoSuchMethodException e) { constructor = null; } // 2. Otherwise, look for a constructor "public Udf()". if (constructor == null) { try { constructor = udfClass.getConstructor(); if (Modifier.isPublic(constructor.getModifiers())) { args = new Object[] {}; } else { constructor = null; } } catch (NoSuchMethodException e) { constructor = null; } } // 3. Else, no constructor suitable. if (constructor == null) { throw MondrianResource.instance().UdfClassWrongIface.ex( functionNameOrEmpty, className, UserDefinedFunction.class.getName()); } // Instantiate class. try { udf = (UserDefinedFunction) constructor.newInstance(args); } catch (InstantiationException e) { throw MondrianResource.instance().UdfClassWrongIface.ex( functionNameOrEmpty, className, UserDefinedFunction.class.getName()); } catch (IllegalAccessException e) { throw MondrianResource.instance().UdfClassWrongIface.ex( functionName, className, UserDefinedFunction.class.getName()); } catch (ClassCastException e) { throw MondrianResource.instance().UdfClassWrongIface.ex( functionNameOrEmpty, className, UserDefinedFunction.class.getName()); } catch (InvocationTargetException e) { throw MondrianResource.instance().UdfClassWrongIface.ex( functionName, className, UserDefinedFunction.class.getName()); } return udf; } /** * Check the resultSize against the result limit setting. Throws * LimitExceededDuringCrossjoin exception if limit exceeded. * * When it is called from RolapNativeSet.checkCrossJoin(), it is only * possible to check the known input size, because the final CJ result * will come from the DB(and will be checked against the limit when * fetching from the JDBC result set, in SqlTupleReader.prepareTuples()) * * @param resultSize Result limit * @throws ResourceLimitExceededException */ public static void checkCJResultLimit(long resultSize) { int resultLimit = MondrianProperties.instance().ResultLimit.get(); // Throw an exeption, if the size of the crossjoin exceeds the result // limit. if (resultLimit > 0 && resultLimit < resultSize) { throw MondrianResource.instance().LimitExceededDuringCrossjoin.ex( resultSize, resultLimit); } // Throw an exception if the crossjoin exceeds a reasonable limit. // (Yes, 4 billion is a reasonable limit.) if (resultSize > Integer.MAX_VALUE) { throw MondrianResource.instance().LimitExceededDuringCrossjoin.ex( resultSize, Integer.MAX_VALUE); } } /** * Converts an olap4j connect string into a legacy mondrian connect string. * * <p>For example, * "jdbc:mondrian:Datasource=jdbc/SampleData;Catalog=foodmart/FoodMart.xml;" * becomes * "Provider=Mondrian; * Datasource=jdbc/SampleData;Catalog=foodmart/FoodMart.xml;" * * <p>This method is intended to allow legacy applications (such as JPivot * and Mondrian's XMLA server) to continue to create connections using * Mondrian's legacy connection API even when they are handed an olap4j * connect string. * * @param url olap4j connect string * @return mondrian connect string, or null if cannot be converted */ public static String convertOlap4jConnectStringToNativeMondrian( String url) { if (url.startsWith("jdbc:mondrian:")) { return "Provider=Mondrian; " + url.substring("jdbc:mondrian:".length()); } return null; } /** * Checks if a String is whitespace, empty ("") or null.</p> * * <pre> * StringUtils.isBlank(null) = true * StringUtils.isBlank("") = true * StringUtils.isBlank(" ") = true * StringUtils.isBlank("bob") = false * StringUtils.isBlank(" bob ") = false * </pre> * * <p>(Copied from commons-lang.) * * @param str the String to check, may be null * @return <code>true</code> if the String is null, empty or whitespace */ public static boolean isBlank(String str) { final int strLen; if (str == null || (strLen = str.length()) == 0) { return true; } for (int i = 0; i < strLen; i++) { if (!Character.isWhitespace(str.charAt(i))) { return false; } } return true; } /** * Returns a role which has access to everything. * @param schema A schema to bind this role to. * @return A role with root access to the schema. */ public static Role createRootRole(Schema schema) { RoleImpl role = new RoleImpl(); role.grant(schema, Access.ALL); role.makeImmutable(); return role; } /** * Tries to find the cube from which a dimension is taken. * It considers private dimensions, shared dimensions and virtual * dimensions. If it can't determine with certitude the origin * of the dimension, it returns null. */ public static Cube getDimensionCube(Dimension dimension) { final Cube[] cubes = dimension.getSchema().getCubes(); for (Cube cube : cubes) { for (Dimension dimension1 : cube.getDimensions()) { // If the dimensions have the same identity, // we found an access rule. if (dimension == dimension1) { return cube; } // If the passed dimension argument is of class // RolapCubeDimension, we must validate the cube // assignment and make sure the cubes are the same. // If not, skip to the next grant. if (dimension instanceof RolapCubeDimension && dimension.equals(dimension1) && !((RolapCubeDimension)dimension1) .getCube() .equals(cube)) { continue; } // Last thing is to allow for equality correspondences // to work with virtual cubes. if (cube instanceof RolapCube && ((RolapCube)cube).isVirtual() && dimension.equals(dimension1)) { return cube; } } } return null; } /** * Similar to {@link ClassLoader#getResource(String)}, except the lookup * is in reverse order.<br> * i.e. returns the resource from the supplied classLoader or the * one closest to it in the hierarchy, instead of the closest to the root * class loader * @param classLoader The class loader to fetch from * @param name The resource name * @return A URL object for reading the resource, or null if the resource * could not be found or the invoker doesn't have adequate privileges to get * the resource. * @see ClassLoader#getResource(String) * @see ClassLoader#getResources(String) */ public static URL getClosestResource(ClassLoader classLoader, String name) { URL resource = null; try { // The last resource will be from the nearest ClassLoader. Enumeration<URL> resourceCandidates = classLoader.getResources(name); while (resourceCandidates.hasMoreElements()) { resource = resourceCandidates.nextElement(); } } catch (IOException ioe) { // ignore exception - it's OK if file is not found // just keep getResource contract and return null Util.discard(ioe); } return resource; } public static abstract class AbstractFlatList<T> implements List<T>, RandomAccess { protected final List<T> asArrayList() { //noinspection unchecked return Arrays.asList((T[]) toArray()); } public Iterator<T> iterator() { return asArrayList().iterator(); } public ListIterator<T> listIterator() { return asArrayList().listIterator(); } public boolean isEmpty() { return false; } public boolean add(Object t) { throw new UnsupportedOperationException(); } public boolean addAll(Collection<? extends T> c) { throw new UnsupportedOperationException(); } public boolean addAll(int index, Collection<? extends T> c) { throw new UnsupportedOperationException(); } public boolean removeAll(Collection<?> c) { throw new UnsupportedOperationException(); } public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException(); } public void clear() { throw new UnsupportedOperationException(); } public T set(int index, Object element) { throw new UnsupportedOperationException(); } public void add(int index, Object element) { throw new UnsupportedOperationException(); } public T remove(int index) { throw new UnsupportedOperationException(); } public ListIterator<T> listIterator(int index) { return asArrayList().listIterator(index); } public List<T> subList(int fromIndex, int toIndex) { return asArrayList().subList(fromIndex, toIndex); } public boolean contains(Object o) { return indexOf(o) >= 0; } public boolean containsAll(Collection<?> c) { Iterator<?> e = c.iterator(); while (e.hasNext()) { if (!contains(e.next())) { return false; } } return true; } public boolean remove(Object o) { throw new UnsupportedOperationException(); } } /** * List that stores its two elements in the two members of the class. * Unlike {@link java.util.ArrayList} or * {@link java.util.Arrays#asList(Object[])} there is * no array, only one piece of memory allocated, therefore is very compact * and cache and CPU efficient. * * <p>The list is read-only, cannot be modified or resized, and neither * of the elements can be null. * * <p>The list is created via {@link Util#flatList(Object[])}. * * @see mondrian.olap.Util.Flat3List * @param <T> */ protected static class Flat2List<T> extends AbstractFlatList<T> { private final T t0; private final T t1; Flat2List(T t0, T t1) { this.t0 = t0; this.t1 = t1; assert t0 != null; assert t1 != null; } public String toString() { return "[" + t0 + ", " + t1 + "]"; } public T get(int index) { switch (index) { case 0: return t0; case 1: return t1; default: throw new IndexOutOfBoundsException("index " + index); } } public int size() { return 2; } public boolean equals(Object o) { if (o instanceof Flat2List) { Flat2List that = (Flat2List) o; return Util.equals(this.t0, that.t0) && Util.equals(this.t1, that.t1); } return Arrays.asList(t0, t1).equals(o); } public int hashCode() { int h = 1; h = h * 31 + t0.hashCode(); h = h * 31 + t1.hashCode(); return h; } public int indexOf(Object o) { if (t0.equals(o)) { return 0; } if (t1.equals(o)) { return 1; } return -1; } public int lastIndexOf(Object o) { if (t1.equals(o)) { return 1; } if (t0.equals(o)) { return 0; } return -1; } @SuppressWarnings({"unchecked"}) public <T2> T2[] toArray(T2[] a) { a[0] = (T2) t0; a[1] = (T2) t1; return a; } public Object[] toArray() { return new Object[] {t0, t1}; } } /** * List that stores its three elements in the three members of the class. * Unlike {@link java.util.ArrayList} or * {@link java.util.Arrays#asList(Object[])} there is * no array, only one piece of memory allocated, therefore is very compact * and cache and CPU efficient. * * <p>The list is read-only, cannot be modified or resized, and none * of the elements can be null. * * <p>The list is created via {@link Util#flatList(Object[])}. * * @see mondrian.olap.Util.Flat2List * @param <T> */ protected static class Flat3List<T> extends AbstractFlatList<T> { private final T t0; private final T t1; private final T t2; Flat3List(T t0, T t1, T t2) { this.t0 = t0; this.t1 = t1; this.t2 = t2; assert t0 != null; assert t1 != null; assert t2 != null; } public String toString() { return "[" + t0 + ", " + t1 + ", " + t2 + "]"; } public T get(int index) { switch (index) { case 0: return t0; case 1: return t1; case 2: return t2; default: throw new IndexOutOfBoundsException("index " + index); } } public int size() { return 3; } public boolean equals(Object o) { if (o instanceof Flat3List) { Flat3List that = (Flat3List) o; return Util.equals(this.t0, that.t0) && Util.equals(this.t1, that.t1) && Util.equals(this.t2, that.t2); } return o.equals(this); } public int hashCode() { int h = 1; h = h * 31 + t0.hashCode(); h = h * 31 + t1.hashCode(); h = h * 31 + t2.hashCode(); return h; } public int indexOf(Object o) { if (t0.equals(o)) { return 0; } if (t1.equals(o)) { return 1; } if (t2.equals(o)) { return 2; } return -1; } public int lastIndexOf(Object o) { if (t2.equals(o)) { return 2; } if (t1.equals(o)) { return 1; } if (t0.equals(o)) { return 0; } return -1; } @SuppressWarnings({"unchecked"}) public <T2> T2[] toArray(T2[] a) { a[0] = (T2) t0; a[1] = (T2) t1; a[2] = (T2) t2; return a; } public Object[] toArray() { return new Object[] {t0, t1, t2}; } } /** * Garbage-collecting iterator. Iterates over a collection of references, * and if any of the references has been garbage-collected, removes it from * the collection. * * @param <T> Element type */ public static class GcIterator<T> implements Iterator<T> { private final Iterator<? extends Reference<T>> iterator; private boolean hasNext; private T next; public GcIterator(Iterator<? extends Reference<T>> iterator) { this.iterator = iterator; this.hasNext = true; moveToNext(); } /** * Creates an iterator over a collection of references. * * @param referenceIterable Collection of references * @param <T2> element type * @return iterable over collection */ public static <T2> Iterable<T2> over( final Iterable<? extends Reference<T2>> referenceIterable) { return new Iterable<T2>() { public Iterator<T2> iterator() { return new GcIterator<T2>(referenceIterable.iterator()); } }; } private void moveToNext() { while (iterator.hasNext()) { final Reference<T> ref = iterator.next(); next = ref.get(); if (next != null) { return; } iterator.remove(); } hasNext = false; } public boolean hasNext() { return hasNext; } public T next() { final T next1 = next; moveToNext(); return next1; } public void remove() { throw new UnsupportedOperationException(); } } public static interface Functor1<RT, PT> { RT apply(PT param); } public static <T> Functor1<T, T> identityFunctor() { //noinspection unchecked return IDENTITY_FUNCTOR; } private static final Functor1 IDENTITY_FUNCTOR = new Functor1<Object, Object>() { public Object apply(Object param) { return param; } }; public static <PT> Functor1<Boolean, PT> trueFunctor() { //noinspection unchecked return TRUE_FUNCTOR; } public static <PT> Functor1<Boolean, PT> falseFunctor() { //noinspection unchecked return FALSE_FUNCTOR; } private static final Functor1 TRUE_FUNCTOR = new Functor1<Boolean, Object>() { public Boolean apply(Object param) { return true; } }; private static final Functor1 FALSE_FUNCTOR = new Functor1<Boolean, Object>() { public Boolean apply(Object param) { return false; } }; /** * Information about memory usage. * * @see mondrian.olap.Util#getMemoryInfo() */ public interface MemoryInfo { Usage get(); public interface Usage { long getUsed(); long getCommitted(); long getMax(); } } /** * A {@link Comparator} implementation which can deal * correctly with {@link RolapUtil#sqlNullValue}. */ public static class SqlNullSafeComparator implements Comparator<Comparable> { public static final SqlNullSafeComparator instance = new SqlNullSafeComparator(); private SqlNullSafeComparator() { } public int compare(Comparable o1, Comparable o2) { if (o1 == RolapUtil.sqlNullValue) { return -1; } if (o2 == RolapUtil.sqlNullValue) { return 1; } return o1.compareTo(o2); } } /** * This class implements the Knuth-Morris-Pratt algorithm * to search within a byte array for a token byte array. */ public static class ByteMatcher { private final int[] matcher; public final byte[] key; public ByteMatcher(byte[] key) { this.key = key; this.matcher = compile(key); } /** * Matches the pre-compiled byte array token against a * byte array variable and returns the index of the key * within the array. * @param a An array of bytes to search for. * @return -1 if not found, or the index (0 based) of the match. */ public int match(byte[] a) { int j = 0; for (int i = 0; i < a.length; i++) { while (j > 0 && key[j] != a[i]) { j = matcher[j - 1]; } if (a[i] == key[j]) { j++; } if (key.length == j) { return i - key.length + 1; } } return -1; } private int[] compile(byte[] key) { int[] matcher = new int[key.length]; int j = 0; for (int i = 1; i < key.length; i++) { while (j > 0 && key[j] != key[i]) { j = matcher[j - 1]; } if (key[i] == key[j]) { j++; } matcher[i] = j; } return matcher; } } /** * Transforms a list into a map for which all the keys return * a null value associated to it. * * <p>The list passed as an argument will be used to back * the map returned and as many methods are overridden as * possible to make sure that we don't iterate over the backing * list when creating it and when performing operations like * .size(), entrySet() and contains(). * * <p>The returned map is to be considered immutable. It will * throw an {@link UnsupportedOperationException} if attempts to * modify it are made. */ public static <K, V> Map<K, V> toNullValuesMap(List<K> list) { return new NullValuesMap<K, V>(list); } private static class NullValuesMap<K, V> extends AbstractMap<K, V> { private final List<K> list; private NullValuesMap(List<K> list) { super(); this.list = Collections.unmodifiableList(list); } public Set<Entry<K, V>> entrySet() { return new AbstractSet<Entry<K, V>>() { public Iterator<Entry<K, V>> iterator() { return new Iterator<Entry<K, V>>() { private int pt = -1; public void remove() { throw new UnsupportedOperationException(); } @SuppressWarnings("unchecked") public Entry<K, V> next() { return new AbstractMapEntry( list.get(++pt), null) {}; } public boolean hasNext() { return pt < list.size(); } }; } public int size() { return list.size(); } public boolean contains(Object o) { if (o instanceof Entry) { if (list.contains(((Entry) o).getKey())) { return true; } } return false; } }; } public Set<K> keySet() { return new AbstractSet<K>() { public Iterator<K> iterator() { return new Iterator<K>() { private int pt = -1; public void remove() { throw new UnsupportedOperationException(); } public K next() { return list.get(++pt); } public boolean hasNext() { return pt < list.size(); } }; } public int size() { return list.size(); } public boolean contains(Object o) { return list.contains(o); } }; } public Collection<V> values() { return new AbstractList<V>() { public V get(int index) { return null; } public int size() { return list.size(); } public boolean contains(Object o) { if (o == null && size() > 0) { return true; } else { return false; } } }; } public V get(Object key) { return null; } public boolean containsKey(Object key) { return list.contains(key); } public boolean containsValue(Object o) { if (o == null && size() > 0) { return true; } else { return false; } } } } // End Util.java
package org.dolphinemu.dolphinemu.viewholders; import android.view.View; import android.widget.ImageView; import androidx.leanback.widget.ImageCardView; import androidx.leanback.widget.Presenter; import org.dolphinemu.dolphinemu.model.GameFile; /** * A simple class that stores references to views so that the GameAdapter doesn't need to * keep calling findViewById(), which is expensive. */ public final class TvGameViewHolder extends Presenter.ViewHolder { public ImageCardView cardParent; public ImageView imageScreenshot; public GameFile gameFile; public TvGameViewHolder(View itemView) { super(itemView); itemView.setTag(this); cardParent = (ImageCardView) itemView; imageScreenshot = cardParent.getMainImageView(); } }
/** * # Hooks & Filters * * This file contains all of the form functions of the main _inbound object. * Filters and actions are described below * * Forked from https://github.com/carldanley/WP-JS-Hooks/blob/master/src/event-manager.js * * @author David Wells <david@inboundnow.com> * @version 0.0.1 */ var _inboundHooks = (function (_inbound) { /** * # EventManager * * Actions and filters List * addAction( 'namespace.identifier', callback, priority ) * addFilter( 'namespace.identifier', callback, priority ) * removeAction( 'namespace.identifier' ) * removeFilter( 'namespace.identifier' ) * doAction( 'namespace.identifier', arg1, arg2, moreArgs, finalArg ) * applyFilters( 'namespace.identifier', content ) * @return {[type]} [description] */ /** * Handles managing all events for whatever you plug it into. Priorities for hooks are based on lowest to highest in * that, lowest priority hooks are fired first. */ var EventManager = function() { /** * Maintain a reference to the object scope so our public methods never get confusing. */ var MethodsAvailable = { removeFilter : removeFilter, applyFilters : applyFilters, addFilter : addFilter, removeAction : removeAction, doAction : doAction, addAction : addAction }; /** * Contains the hooks that get registered with this EventManager. The array for storage utilizes a "flat" * object literal such that looking up the hook utilizes the native object literal hash. */ var STORAGE = { actions : {}, filters : {} }; /** * Adds an action to the event manager. * * @param action Must contain namespace.identifier * @param callback Must be a valid callback function before this action is added * @param [priority=10] Used to control when the function is executed in relation to other callbacks bound to the same hook * @param [context] Supply a value to be used for this */ function addAction( action, callback, priority, context ) { if( typeof action === 'string' && typeof callback === 'function' ) { priority = parseInt( ( priority || 10 ), 10 ); _addHook( 'actions', action, callback, priority, context ); } return MethodsAvailable; } /** * Performs an action if it exists. You can pass as many arguments as you want to this function; the only rule is * that the first argument must always be the action. */ function doAction( /* action, arg1, arg2, ... */ ) { var args = Array.prototype.slice.call( arguments ); var action = args.shift(); if( typeof action === 'string' ) { _runHook( 'actions', action, args ); } return MethodsAvailable; } /** * Removes the specified action if it contains a namespace.identifier & exists. * * @param action The action to remove * @param [callback] Callback function to remove */ function removeAction( action, callback ) { if( typeof action === 'string' ) { _removeHook( 'actions', action, callback ); } return MethodsAvailable; } /** * Adds a filter to the event manager. * * @param filter Must contain namespace.identifier * @param callback Must be a valid callback function before this action is added * @param [priority=10] Used to control when the function is executed in relation to other callbacks bound to the same hook * @param [context] Supply a value to be used for this */ function addFilter( filter, callback, priority, context ) { if( typeof filter === 'string' && typeof callback === 'function' ) { //console.log('add filter', filter); priority = parseInt( ( priority || 10 ), 10 ); _addHook( 'filters', filter, callback, priority ); } return MethodsAvailable; } /** * Performs a filter if it exists. You should only ever pass 1 argument to be filtered. The only rule is that * the first argument must always be the filter. */ function applyFilters( /* filter, filtered arg, arg2, ... */ ) { var args = Array.prototype.slice.call( arguments ); var filter = args.shift(); if( typeof filter === 'string' ) { return _runHook( 'filters', filter, args ); } return MethodsAvailable; } /** * Removes the specified filter if it contains a namespace.identifier & exists. * * @param filter The action to remove * @param [callback] Callback function to remove */ function removeFilter( filter, callback ) { if( typeof filter === 'string') { _removeHook( 'filters', filter, callback ); } return MethodsAvailable; } /** * Removes the specified hook by resetting the value of it. * * @param type Type of hook, either 'actions' or 'filters' * @param hook The hook (namespace.identifier) to remove * @private */ function _removeHook( type, hook, callback, context ) { if ( !STORAGE[ type ][ hook ] ) { return; } if ( !callback ) { STORAGE[ type ][ hook ] = []; } else { var handlers = STORAGE[ type ][ hook ]; var i; if ( !context ) { for ( i = handlers.length; i--; ) { if ( handlers[i].callback === callback ) { handlers.splice( i, 1 ); } } } else { for ( i = handlers.length; i--; ) { var handler = handlers[i]; if ( handler.callback === callback && handler.context === context) { handlers.splice( i, 1 ); } } } } } /** * Adds the hook to the appropriate storage container * * @param type 'actions' or 'filters' * @param hook The hook (namespace.identifier) to add to our event manager * @param callback The function that will be called when the hook is executed. * @param priority The priority of this hook. Must be an integer. * @param [context] A value to be used for this * @private */ function _addHook( type, hook, callback, priority, context ) { var hookObject = { callback : callback, priority : priority, context : context }; // Utilize 'prop itself' : http://jsperf.com/hasownproperty-vs-in-vs-undefined/19 var hooks = STORAGE[ type ][ hook ]; if( hooks ) { hooks.push( hookObject ); hooks = _hookInsertSort( hooks ); } else { hooks = [ hookObject ]; } STORAGE[ type ][ hook ] = hooks; } /** * Use an insert sort for keeping our hooks organized based on priority. This function is ridiculously faster * than bubble sort, etc: http://jsperf.com/javascript-sort * * @param hooks The custom array containing all of the appropriate hooks to perform an insert sort on. * @private */ function _hookInsertSort( hooks ) { var tmpHook, j, prevHook; for( var i = 1, len = hooks.length; i < len; i++ ) { tmpHook = hooks[ i ]; j = i; while( ( prevHook = hooks[ j - 1 ] ) && prevHook.priority > tmpHook.priority ) { hooks[ j ] = hooks[ j - 1 ]; --j; } hooks[ j ] = tmpHook; } return hooks; } /** * Runs the specified hook. If it is an action, the value is not modified but if it is a filter, it is. * * @param type 'actions' or 'filters' * @param hook The hook ( namespace.identifier ) to be ran. * @param args Arguments to pass to the action/filter. If it's a filter, args is actually a single parameter. * @private */ function _runHook( type, hook, args ) { var handlers = STORAGE[ type ][ hook ]; if ( !handlers ) { return (type === 'filters') ? args[0] : false; } var i = 0, len = handlers.length; if ( type === 'filters' ) { for ( ; i < len; i++ ) { args[ 0 ] = handlers[ i ].callback.apply( handlers[ i ].context, args ); } } else { for ( ; i < len; i++ ) { handlers[ i ].callback.apply( handlers[ i ].context, args ); } } return ( type === 'filters' ) ? args[ 0 ] : true; } // return all of the publicly available methods return MethodsAvailable; }; _inbound.hooks = new EventManager(); /** * Event Hooks and Filters public methods */ /* * add_action * * This function uses _inbound.hooks to mimics WP add_action * * ```js * function Inbound_Add_Action_Example(data) { * // Do stuff here. * }; * // Add action to the hook * _inbound.add_action( 'name_of_action', Inbound_Add_Action_Example, 10 ); * ``` */ _inbound.add_action = function() { // allow multiple action parameters such as 'ready append' var actions = arguments[0].split(' '); for( k in actions ) { // prefix action arguments[0] = 'inbound.' + actions[ k ]; _inbound.hooks.addAction.apply(this, arguments); } return this; }; /* * remove_action * * This function uses _inbound.hooks to mimics WP remove_action * * ```js * // Add remove action 'name_of_action' * _inbound.remove_action( 'name_of_action'); * ``` * */ _inbound.remove_action = function() { // prefix action arguments[0] = 'inbound.' + arguments[0]; _inbound.hooks.removeAction.apply(this, arguments); return this; }; /* * do_action * * This function uses _inbound.hooks to mimics WP do_action * This is used if you want to allow for third party JS plugins to act on your functions * */ _inbound.do_action = function() { // prefix action arguments[0] = 'inbound.' + arguments[0]; _inbound.hooks.doAction.apply(this, arguments); return this; }; /* * add_filter * * This function uses _inbound.hooks to mimics WP add_filter * * ```js * _inbound.add_filter( 'urlParamFilter', URL_Param_Filter, 10 ); * function URL_Param_Filter(urlParams) { * * var params = urlParams || {}; * // check for item in object * if(params.utm_source !== "undefined"){ * //alert('url param "utm_source" is here'); * } * * // delete item from object * delete params.utm_source; * * return params; * * } * ``` */ _inbound.add_filter = function() { // prefix action arguments[0] = 'inbound.' + arguments[0]; _inbound.hooks.addFilter.apply(this, arguments); return this; }; /* * remove_filter * * This function uses _inbound.hooks to mimics WP remove_filter * * ```js * // Add remove filter 'urlParamFilter' * _inbound.remove_action( 'urlParamFilter'); * ``` * */ _inbound.remove_filter = function() { // prefix action arguments[0] = 'inbound.' + arguments[0]; _inbound.hooks.removeFilter.apply(this, arguments); return this; }; /* * apply_filters * * This function uses _inbound.hooks to mimics WP apply_filters * */ _inbound.apply_filters = function() { //console.log('Filter:' + arguments[0] + " ran on ->", arguments[1]); // prefix action arguments[0] = 'inbound.' + arguments[0]; return _inbound.hooks.applyFilters.apply(this, arguments); }; return _inbound; })(_inbound || {});
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>CKFinder käyttöohje</title><meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <link href="other/help.css" type="text/css" rel="stylesheet" /> <script type="text/javascript" src="other/help.js"></script> </head> <body> <h1> Päivitä-nappi </h1> <p > <strong>Jaetussa ohjelmistoympäristössä</strong>, kuten CKFinder, voi työskennellä useita käyttäjiä yhtä aikaa. Tällöin yhteisessä kansiossa oleviin tiedostoihin voi tulla lähes samanaikaisia muutoksia. Saat muutokset näkyviin "Päivitä"-toiminnolla. </p> <p style="text-align: center"> <img height="25" src="../../en/files/images/011.gif" width="258" />&nbsp;</p> <p> Oletetaan, että luot uuden sivun yrityksenne tuliterälle tuotteelle. Avaat CKFinderin ladataksesi tuotekuvan tekemällesi sivulle. Mutta avatessasi "Tuotteet"-kansiota, kuvaa ei löydykään. Pirautat "Maikille", ja ihmettelet, mihin kuvat ovat kadonneet. Maikki pyytää odottamaan tovin ja lataa kuvan palvelimelle. Sitten hän kehottaa sinua päivittämään kansiorakenteesi. Ja voila! Siellähän kuva näkyykin. </body> </html>
/****************************************************** The transaction (c) 1996 Innobase Oy Created 3/26/1996 Heikki Tuuri *******************************************************/ #include "trx0trx.h" #ifdef UNIV_NONINL #include "trx0trx.ic" #endif #include "trx0undo.h" #include "trx0rseg.h" #include "log0log.h" #include "que0que.h" #include "lock0lock.h" #include "trx0roll.h" #include "usr0sess.h" #include "read0read.h" #include "srv0srv.h" #include "thr0loc.h" #include "btr0sea.h" #include "os0proc.h" #include "trx0xa.h" #include "ha_prototypes.h" /* Copy of the prototype for innobase_mysql_print_thd: this copy MUST be equal to the one in mysql/sql/ha_innodb.cc ! */ void innobase_mysql_print_thd( FILE* f, void* thd, ulint max_query_len); /* Dummy session used currently in MySQL interface */ sess_t* trx_dummy_sess = NULL; /* Number of transactions currently allocated for MySQL: protected by the kernel mutex */ ulint trx_n_mysql_transactions = 0; /* Number of transactions currently in the XA PREPARED state: protected by the kernel mutex */ ulint trx_n_prepared = 0; /***************************************************************** Starts the transaction if it is not yet started. */ void trx_start_if_not_started_noninline( /*===============================*/ trx_t* trx) /* in: transaction */ { trx_start_if_not_started(trx); } /***************************************************************** Set detailed error message for the transaction. */ void trx_set_detailed_error( /*===================*/ trx_t* trx, /* in: transaction struct */ const char* msg) /* in: detailed error message */ { ut_strlcpy(trx->detailed_error, msg, sizeof(trx->detailed_error)); } /***************************************************************** Set detailed error message for the transaction from a file. Note that the file is rewinded before reading from it. */ void trx_set_detailed_error_from_file( /*=============================*/ trx_t* trx, /* in: transaction struct */ FILE* file) /* in: file to read message from */ { os_file_read_string(file, trx->detailed_error, sizeof(trx->detailed_error)); } /******************************************************************** Retrieves the error_info field from a trx. */ void* trx_get_error_info( /*===============*/ /* out: the error info */ trx_t* trx) /* in: trx object */ { return(trx->error_info); } /******************************************************************** Creates and initializes a transaction object. */ trx_t* trx_create( /*=======*/ /* out, own: the transaction */ sess_t* sess) /* in: session or NULL */ { trx_t* trx; ut_ad(mutex_own(&kernel_mutex)); trx = mem_alloc(sizeof(trx_t)); trx->magic_n = TRX_MAGIC_N; trx->op_info = ""; trx->is_purge = 0; trx->conc_state = TRX_NOT_STARTED; trx->start_time = time(NULL); trx->isolation_level = TRX_ISO_REPEATABLE_READ; trx->id = ut_dulint_zero; trx->no = ut_dulint_max; trx->support_xa = TRUE; trx->check_foreigns = TRUE; trx->check_unique_secondary = TRUE; trx->flush_log_later = FALSE; trx->must_flush_log_later = FALSE; trx->dict_operation = FALSE; trx->mysql_thd = NULL; trx->mysql_query_str = NULL; trx->mysql_query_len = NULL; trx->active_trans = 0; trx->duplicates = 0; trx->n_mysql_tables_in_use = 0; trx->mysql_n_tables_locked = 0; trx->mysql_log_file_name = NULL; trx->mysql_log_offset = 0; mutex_create(&trx->undo_mutex, SYNC_TRX_UNDO); trx->rseg = NULL; trx->undo_no = ut_dulint_zero; trx->last_sql_stat_start.least_undo_no = ut_dulint_zero; trx->insert_undo = NULL; trx->update_undo = NULL; trx->undo_no_arr = NULL; trx->error_state = DB_SUCCESS; trx->detailed_error[0] = '\0'; trx->sess = sess; trx->que_state = TRX_QUE_RUNNING; trx->n_active_thrs = 0; trx->handling_signals = FALSE; UT_LIST_INIT(trx->signals); UT_LIST_INIT(trx->reply_signals); trx->graph = NULL; trx->wait_lock = NULL; trx->was_chosen_as_deadlock_victim = FALSE; UT_LIST_INIT(trx->wait_thrs); trx->lock_heap = mem_heap_create_in_buffer(256); UT_LIST_INIT(trx->trx_locks); UT_LIST_INIT(trx->trx_savepoints); trx->dict_operation_lock_mode = 0; trx->has_search_latch = FALSE; trx->search_latch_timeout = BTR_SEA_TIMEOUT; trx->declared_to_be_inside_innodb = FALSE; trx->n_tickets_to_enter_innodb = 0; trx->auto_inc_lock = NULL; trx->global_read_view_heap = mem_heap_create(256); trx->global_read_view = NULL; trx->read_view = NULL; /* Set X/Open XA transaction identification to NULL */ memset(&trx->xid, 0, sizeof(trx->xid)); trx->xid.formatID = -1; trx->n_autoinc_rows = 0; return(trx); } /************************************************************************ Creates a transaction object for MySQL. */ trx_t* trx_allocate_for_mysql(void) /*========================*/ /* out, own: transaction object */ { trx_t* trx; mutex_enter(&kernel_mutex); /* Open a dummy session */ if (!trx_dummy_sess) { trx_dummy_sess = sess_open(); } trx = trx_create(trx_dummy_sess); trx_n_mysql_transactions++; UT_LIST_ADD_FIRST(mysql_trx_list, trx_sys->mysql_trx_list, trx); mutex_exit(&kernel_mutex); trx->mysql_thread_id = os_thread_get_curr_id(); trx->mysql_process_no = os_proc_get_number(); return(trx); } /************************************************************************ Creates a transaction object for background operations by the master thread. */ trx_t* trx_allocate_for_background(void) /*=============================*/ /* out, own: transaction object */ { trx_t* trx; mutex_enter(&kernel_mutex); /* Open a dummy session */ if (!trx_dummy_sess) { trx_dummy_sess = sess_open(); } trx = trx_create(trx_dummy_sess); mutex_exit(&kernel_mutex); return(trx); } /************************************************************************ Releases the search latch if trx has reserved it. */ void trx_search_latch_release_if_reserved( /*=================================*/ trx_t* trx) /* in: transaction */ { if (trx->has_search_latch) { rw_lock_s_unlock(&btr_search_latch); trx->has_search_latch = FALSE; } } /************************************************************************ Frees a transaction object. */ void trx_free( /*=====*/ trx_t* trx) /* in, own: trx object */ { ut_ad(mutex_own(&kernel_mutex)); if (trx->declared_to_be_inside_innodb) { ut_print_timestamp(stderr); fputs(" InnoDB: Error: Freeing a trx which is declared" " to be processing\n" "InnoDB: inside InnoDB.\n", stderr); trx_print(stderr, trx, 600); putc('\n', stderr); /* This is an error but not a fatal error. We must keep the counters like srv_conc_n_threads accurate. */ srv_conc_force_exit_innodb(trx); } if (trx->n_mysql_tables_in_use != 0 || trx->mysql_n_tables_locked != 0) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: MySQL is freeing a thd\n" "InnoDB: though trx->n_mysql_tables_in_use is %lu\n" "InnoDB: and trx->mysql_n_tables_locked is %lu.\n", (ulong)trx->n_mysql_tables_in_use, (ulong)trx->mysql_n_tables_locked); trx_print(stderr, trx, 600); ut_print_buf(stderr, trx, sizeof(trx_t)); } ut_a(trx->magic_n == TRX_MAGIC_N); trx->magic_n = 11112222; ut_a(trx->conc_state == TRX_NOT_STARTED); mutex_free(&(trx->undo_mutex)); ut_a(trx->insert_undo == NULL); ut_a(trx->update_undo == NULL); if (trx->undo_no_arr) { trx_undo_arr_free(trx->undo_no_arr); } ut_a(UT_LIST_GET_LEN(trx->signals) == 0); ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0); ut_a(trx->wait_lock == NULL); ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0); ut_a(!trx->has_search_latch); ut_a(!trx->auto_inc_lock); ut_a(trx->dict_operation_lock_mode == 0); if (trx->lock_heap) { mem_heap_free(trx->lock_heap); } ut_a(UT_LIST_GET_LEN(trx->trx_locks) == 0); if (trx->global_read_view_heap) { mem_heap_free(trx->global_read_view_heap); } trx->global_read_view = NULL; ut_a(trx->read_view == NULL); mem_free(trx); } /************************************************************************ Frees a transaction object for MySQL. */ void trx_free_for_mysql( /*===============*/ trx_t* trx) /* in, own: trx object */ { mutex_enter(&kernel_mutex); UT_LIST_REMOVE(mysql_trx_list, trx_sys->mysql_trx_list, trx); trx_free(trx); ut_a(trx_n_mysql_transactions > 0); trx_n_mysql_transactions--; mutex_exit(&kernel_mutex); } /************************************************************************ Frees a transaction object of a background operation of the master thread. */ void trx_free_for_background( /*====================*/ trx_t* trx) /* in, own: trx object */ { mutex_enter(&kernel_mutex); trx_free(trx); mutex_exit(&kernel_mutex); } /******************************************************************** Inserts the trx handle in the trx system trx list in the right position. The list is sorted on the trx id so that the biggest id is at the list start. This function is used at the database startup to insert incomplete transactions to the list. */ static void trx_list_insert_ordered( /*====================*/ trx_t* trx) /* in: trx handle */ { trx_t* trx2; ut_ad(mutex_own(&kernel_mutex)); trx2 = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx2 != NULL) { if (ut_dulint_cmp(trx->id, trx2->id) >= 0) { ut_ad(ut_dulint_cmp(trx->id, trx2->id) == 1); break; } trx2 = UT_LIST_GET_NEXT(trx_list, trx2); } if (trx2 != NULL) { trx2 = UT_LIST_GET_PREV(trx_list, trx2); if (trx2 == NULL) { UT_LIST_ADD_FIRST(trx_list, trx_sys->trx_list, trx); } else { UT_LIST_INSERT_AFTER(trx_list, trx_sys->trx_list, trx2, trx); } } else { UT_LIST_ADD_LAST(trx_list, trx_sys->trx_list, trx); } } /******************************************************************** Creates trx objects for transactions and initializes the trx list of trx_sys at database start. Rollback segment and undo log lists must already exist when this function is called, because the lists of transactions to be rolled back or cleaned up are built based on the undo log lists. */ void trx_lists_init_at_db_start(void) /*============================*/ { trx_rseg_t* rseg; trx_undo_t* undo; trx_t* trx; UT_LIST_INIT(trx_sys->trx_list); /* Look from the rollback segments if there exist undo logs for transactions */ rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); while (rseg != NULL) { undo = UT_LIST_GET_FIRST(rseg->insert_undo_list); while (undo != NULL) { trx = trx_create(NULL); trx->id = undo->trx_id; trx->xid = undo->xid; trx->insert_undo = undo; trx->rseg = rseg; if (undo->state != TRX_UNDO_ACTIVE) { /* Prepared transactions are left in the prepared state waiting for a commit or abort decision from MySQL */ if (undo->state == TRX_UNDO_PREPARED) { fprintf(stderr, "InnoDB: Transaction %lu %lu" " was in the" " XA prepared state.\n", ut_dulint_get_high(trx->id), ut_dulint_get_low(trx->id)); if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" " innodb_force_recovery" " > 0, we will" " rollback it" " anyway.\n"); trx->conc_state = TRX_ACTIVE; } } else { trx->conc_state = TRX_COMMITTED_IN_MEMORY; } /* We give a dummy value for the trx no; this should have no relevance since purge is not interested in committed transaction numbers, unless they are in the history list, in which case it looks the number from the disk based undo log structure */ trx->no = trx->id; } else { trx->conc_state = TRX_ACTIVE; /* A running transaction always has the number field inited to ut_dulint_max */ trx->no = ut_dulint_max; } if (undo->dict_operation) { trx->dict_operation = undo->dict_operation; trx->table_id = undo->table_id; } if (!undo->empty) { trx->undo_no = ut_dulint_add(undo->top_undo_no, 1); } trx_list_insert_ordered(trx); undo = UT_LIST_GET_NEXT(undo_list, undo); } undo = UT_LIST_GET_FIRST(rseg->update_undo_list); while (undo != NULL) { trx = trx_get_on_id(undo->trx_id); if (NULL == trx) { trx = trx_create(NULL); trx->id = undo->trx_id; trx->xid = undo->xid; if (undo->state != TRX_UNDO_ACTIVE) { /* Prepared transactions are left in the prepared state waiting for a commit or abort decision from MySQL */ if (undo->state == TRX_UNDO_PREPARED) { fprintf(stderr, "InnoDB: Transaction" " %lu %lu was in the" " XA prepared state.\n", ut_dulint_get_high( trx->id), ut_dulint_get_low( trx->id)); if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" " innodb_force_recovery" " > 0, we will" " rollback it" " anyway.\n"); trx->conc_state = TRX_ACTIVE; } } else { trx->conc_state = TRX_COMMITTED_IN_MEMORY; } /* We give a dummy value for the trx number */ trx->no = trx->id; } else { trx->conc_state = TRX_ACTIVE; /* A running transaction always has the number field inited to ut_dulint_max */ trx->no = ut_dulint_max; } trx->rseg = rseg; trx_list_insert_ordered(trx); if (undo->dict_operation) { trx->dict_operation = undo->dict_operation; trx->table_id = undo->table_id; } } trx->update_undo = undo; if ((!undo->empty) && (ut_dulint_cmp(undo->top_undo_no, trx->undo_no) >= 0)) { trx->undo_no = ut_dulint_add(undo->top_undo_no, 1); } undo = UT_LIST_GET_NEXT(undo_list, undo); } rseg = UT_LIST_GET_NEXT(rseg_list, rseg); } } /********************************************************************** Assigns a rollback segment to a transaction in a round-robin fashion. Skips the SYSTEM rollback segment if another is available. */ UNIV_INLINE ulint trx_assign_rseg(void) /*=================*/ /* out: assigned rollback segment id */ { trx_rseg_t* rseg = trx_sys->latest_rseg; ut_ad(mutex_own(&kernel_mutex)); loop: /* Get next rseg in a round-robin fashion */ rseg = UT_LIST_GET_NEXT(rseg_list, rseg); if (rseg == NULL) { rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); } /* If it is the SYSTEM rollback segment, and there exist others, skip it */ if ((rseg->id == TRX_SYS_SYSTEM_RSEG_ID) && (UT_LIST_GET_LEN(trx_sys->rseg_list) > 1)) { goto loop; } trx_sys->latest_rseg = rseg; return(rseg->id); } /******************************************************************** Starts a new transaction. */ ibool trx_start_low( /*==========*/ /* out: TRUE */ trx_t* trx, /* in: transaction */ ulint rseg_id)/* in: rollback segment id; if ULINT_UNDEFINED is passed, the system chooses the rollback segment automatically in a round-robin fashion */ { trx_rseg_t* rseg; ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->rseg == NULL); if (trx->is_purge) { trx->id = ut_dulint_zero; trx->conc_state = TRX_ACTIVE; trx->start_time = time(NULL); return(TRUE); } ut_ad(trx->conc_state != TRX_ACTIVE); if (rseg_id == ULINT_UNDEFINED) { rseg_id = trx_assign_rseg(); } rseg = trx_sys_get_nth_rseg(trx_sys, rseg_id); trx->id = trx_sys_get_new_trx_id(); /* The initial value for trx->no: ut_dulint_max is used in read_view_open_now: */ trx->no = ut_dulint_max; trx->rseg = rseg; trx->conc_state = TRX_ACTIVE; trx->start_time = time(NULL); UT_LIST_ADD_FIRST(trx_list, trx_sys->trx_list, trx); return(TRUE); } /******************************************************************** Starts a new transaction. */ ibool trx_start( /*======*/ /* out: TRUE */ trx_t* trx, /* in: transaction */ ulint rseg_id)/* in: rollback segment id; if ULINT_UNDEFINED is passed, the system chooses the rollback segment automatically in a round-robin fashion */ { ibool ret; mutex_enter(&kernel_mutex); ret = trx_start_low(trx, rseg_id); mutex_exit(&kernel_mutex); return(ret); } /******************************************************************** Commits a transaction. */ void trx_commit_off_kernel( /*==================*/ trx_t* trx) /* in: transaction */ { page_t* update_hdr_page; dulint lsn; trx_rseg_t* rseg; trx_undo_t* undo; ibool must_flush_log = FALSE; mtr_t mtr; ut_ad(mutex_own(&kernel_mutex)); trx->must_flush_log_later = FALSE; rseg = trx->rseg; if (trx->insert_undo != NULL || trx->update_undo != NULL) { mutex_exit(&kernel_mutex); mtr_start(&mtr); must_flush_log = TRUE; /* Change the undo log segment states from TRX_UNDO_ACTIVE to some other state: these modifications to the file data structure define the transaction as committed in the file based world, at the serialization point of the log sequence number lsn obtained below. */ mutex_enter(&(rseg->mutex)); if (trx->insert_undo != NULL) { trx_undo_set_state_at_finish( rseg, trx, trx->insert_undo, &mtr); } undo = trx->update_undo; if (undo) { mutex_enter(&kernel_mutex); trx->no = trx_sys_get_new_trx_no(); mutex_exit(&kernel_mutex); /* It is not necessary to obtain trx->undo_mutex here because only a single OS thread is allowed to do the transaction commit for this transaction. */ update_hdr_page = trx_undo_set_state_at_finish( rseg, trx, undo, &mtr); /* We have to do the cleanup for the update log while holding the rseg mutex because update log headers have to be put to the history list in the order of the trx number. */ trx_undo_update_cleanup(trx, update_hdr_page, &mtr); } mutex_exit(&(rseg->mutex)); /* Update the latest MySQL binlog name and offset info in trx sys header if MySQL binlogging is on or the database server is a MySQL replication slave */ if (trx->mysql_log_file_name && trx->mysql_log_file_name[0] != '\0') { trx_sys_update_mysql_binlog_offset( trx->mysql_log_file_name, trx->mysql_log_offset, TRX_SYS_MYSQL_LOG_INFO, &mtr); trx->mysql_log_file_name = NULL; } /* The following call commits the mini-transaction, making the whole transaction committed in the file-based world, at this log sequence number. The transaction becomes 'durable' when we write the log to disk, but in the logical sense the commit in the file-based data structures (undo logs etc.) happens here. NOTE that transaction numbers, which are assigned only to transactions with an update undo log, do not necessarily come in exactly the same order as commit lsn's, if the transactions have different rollback segments. To get exactly the same order we should hold the kernel mutex up to this point, adding to to the contention of the kernel mutex. However, if a transaction T2 is able to see modifications made by a transaction T1, T2 will always get a bigger transaction number and a bigger commit lsn than T1. */ /*--------------*/ mtr_commit(&mtr); /*--------------*/ lsn = mtr.end_lsn; mutex_enter(&kernel_mutex); } ut_ad(trx->conc_state == TRX_ACTIVE || trx->conc_state == TRX_PREPARED); ut_ad(mutex_own(&kernel_mutex)); if (UNIV_UNLIKELY(trx->conc_state == TRX_PREPARED)) { ut_a(trx_n_prepared > 0); trx_n_prepared--; } /* The following assignment makes the transaction committed in memory and makes its changes to data visible to other transactions. NOTE that there is a small discrepancy from the strict formal visibility rules here: a human user of the database can see modifications made by another transaction T even before the necessary log segment has been flushed to the disk. If the database happens to crash before the flush, the user has seen modifications from T which will never be a committed transaction. However, any transaction T2 which sees the modifications of the committing transaction T, and which also itself makes modifications to the database, will get an lsn larger than the committing transaction T. In the case where the log flush fails, and T never gets committed, also T2 will never get committed. */ /*--------------------------------------*/ trx->conc_state = TRX_COMMITTED_IN_MEMORY; /*--------------------------------------*/ lock_release_off_kernel(trx); if (trx->global_read_view) { read_view_close(trx->global_read_view); mem_heap_empty(trx->global_read_view_heap); trx->global_read_view = NULL; } trx->read_view = NULL; if (must_flush_log) { mutex_exit(&kernel_mutex); if (trx->insert_undo != NULL) { trx_undo_insert_cleanup(trx); } /* NOTE that we could possibly make a group commit more efficient here: call os_thread_yield here to allow also other trxs to come to commit! */ /*-------------------------------------*/ /* Depending on the my.cnf options, we may now write the log buffer to the log files, making the transaction durable if the OS does not crash. We may also flush the log files to disk, making the transaction durable also at an OS crash or a power outage. The idea in InnoDB's group commit is that a group of transactions gather behind a trx doing a physical disk write to log files, and when that physical write has been completed, one of those transactions does a write which commits the whole group. Note that this group commit will only bring benefit if there are > 2 users in the database. Then at least 2 users can gather behind one doing the physical log write to disk. If we are calling trx_commit() under MySQL's binlog mutex, we will delay possible log write and flush to a separate function trx_commit_complete_for_mysql(), which is only called when the thread has released the binlog mutex. This is to make the group commit algorithm to work. Otherwise, the MySQL binlog mutex would serialize all commits and prevent a group of transactions from gathering. */ if (trx->flush_log_later) { /* Do nothing yet */ trx->must_flush_log_later = TRUE; } else if (srv_flush_log_at_trx_commit == 0) { /* Do nothing */ } else if (srv_flush_log_at_trx_commit == 1) { if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { /* Write the log to the log files AND flush them to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE); } } else if (srv_flush_log_at_trx_commit == 2) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { ut_error; } trx->commit_lsn = lsn; /*-------------------------------------*/ mutex_enter(&kernel_mutex); } /* Free all savepoints */ trx_roll_free_all_savepoints(trx); trx->conc_state = TRX_NOT_STARTED; trx->rseg = NULL; trx->undo_no = ut_dulint_zero; trx->last_sql_stat_start.least_undo_no = ut_dulint_zero; trx->mysql_query_str = NULL; trx->mysql_query_len = NULL; ut_ad(UT_LIST_GET_LEN(trx->wait_thrs) == 0); ut_ad(UT_LIST_GET_LEN(trx->trx_locks) == 0); UT_LIST_REMOVE(trx_list, trx_sys->trx_list, trx); } /******************************************************************** Cleans up a transaction at database startup. The cleanup is needed if the transaction already got to the middle of a commit when the database crashed, andf we cannot roll it back. */ void trx_cleanup_at_db_startup( /*======================*/ trx_t* trx) /* in: transaction */ { if (trx->insert_undo != NULL) { trx_undo_insert_cleanup(trx); } trx->conc_state = TRX_NOT_STARTED; trx->rseg = NULL; trx->undo_no = ut_dulint_zero; trx->last_sql_stat_start.least_undo_no = ut_dulint_zero; UT_LIST_REMOVE(trx_list, trx_sys->trx_list, trx); } /************************************************************************ Assigns a read view for a consistent read query. All the consistent reads within the same transaction will get the same read view, which is created when this function is first called for a new started transaction. */ read_view_t* trx_assign_read_view( /*=================*/ /* out: consistent read view */ trx_t* trx) /* in: active transaction */ { ut_ad(trx->conc_state == TRX_ACTIVE); if (trx->read_view) { return(trx->read_view); } mutex_enter(&kernel_mutex); if (!trx->read_view) { trx->read_view = read_view_open_now( trx->id, trx->global_read_view_heap); trx->global_read_view = trx->read_view; } mutex_exit(&kernel_mutex); return(trx->read_view); } /******************************************************************** Commits a transaction. NOTE that the kernel mutex is temporarily released. */ static void trx_handle_commit_sig_off_kernel( /*=============================*/ trx_t* trx, /* in: transaction */ que_thr_t** next_thr) /* in/out: next query thread to run; if the value which is passed in is a pointer to a NULL pointer, then the calling function can start running a new query thread */ { trx_sig_t* sig; trx_sig_t* next_sig; ut_ad(mutex_own(&kernel_mutex)); trx->que_state = TRX_QUE_COMMITTING; trx_commit_off_kernel(trx); ut_ad(UT_LIST_GET_LEN(trx->wait_thrs) == 0); /* Remove all TRX_SIG_COMMIT signals from the signal queue and send reply messages to them */ sig = UT_LIST_GET_FIRST(trx->signals); while (sig != NULL) { next_sig = UT_LIST_GET_NEXT(signals, sig); if (sig->type == TRX_SIG_COMMIT) { trx_sig_reply(sig, next_thr); trx_sig_remove(trx, sig); } sig = next_sig; } trx->que_state = TRX_QUE_RUNNING; } /*************************************************************** The transaction must be in the TRX_QUE_LOCK_WAIT state. Puts it to the TRX_QUE_RUNNING state and releases query threads which were waiting for a lock in the wait_thrs list. */ void trx_end_lock_wait( /*==============*/ trx_t* trx) /* in: transaction */ { que_thr_t* thr; ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT); thr = UT_LIST_GET_FIRST(trx->wait_thrs); while (thr != NULL) { que_thr_end_wait_no_next_thr(thr); UT_LIST_REMOVE(trx_thrs, trx->wait_thrs, thr); thr = UT_LIST_GET_FIRST(trx->wait_thrs); } trx->que_state = TRX_QUE_RUNNING; } /*************************************************************** Moves the query threads in the lock wait list to the SUSPENDED state and puts the transaction to the TRX_QUE_RUNNING state. */ static void trx_lock_wait_to_suspended( /*=======================*/ trx_t* trx) /* in: transaction in the TRX_QUE_LOCK_WAIT state */ { que_thr_t* thr; ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->que_state == TRX_QUE_LOCK_WAIT); thr = UT_LIST_GET_FIRST(trx->wait_thrs); while (thr != NULL) { thr->state = QUE_THR_SUSPENDED; UT_LIST_REMOVE(trx_thrs, trx->wait_thrs, thr); thr = UT_LIST_GET_FIRST(trx->wait_thrs); } trx->que_state = TRX_QUE_RUNNING; } /*************************************************************** Moves the query threads in the sig reply wait list of trx to the SUSPENDED state. */ static void trx_sig_reply_wait_to_suspended( /*============================*/ trx_t* trx) /* in: transaction */ { trx_sig_t* sig; que_thr_t* thr; ut_ad(mutex_own(&kernel_mutex)); sig = UT_LIST_GET_FIRST(trx->reply_signals); while (sig != NULL) { thr = sig->receiver; ut_ad(thr->state == QUE_THR_SIG_REPLY_WAIT); thr->state = QUE_THR_SUSPENDED; sig->receiver = NULL; UT_LIST_REMOVE(reply_signals, trx->reply_signals, sig); sig = UT_LIST_GET_FIRST(trx->reply_signals); } } /********************************************************************* Checks the compatibility of a new signal with the other signals in the queue. */ static ibool trx_sig_is_compatible( /*==================*/ /* out: TRUE if the signal can be queued */ trx_t* trx, /* in: trx handle */ ulint type, /* in: signal type */ ulint sender) /* in: TRX_SIG_SELF or TRX_SIG_OTHER_SESS */ { trx_sig_t* sig; ut_ad(mutex_own(&kernel_mutex)); if (UT_LIST_GET_LEN(trx->signals) == 0) { return(TRUE); } if (sender == TRX_SIG_SELF) { if (type == TRX_SIG_ERROR_OCCURRED) { return(TRUE); } else if (type == TRX_SIG_BREAK_EXECUTION) { return(TRUE); } else { return(FALSE); } } ut_ad(sender == TRX_SIG_OTHER_SESS); sig = UT_LIST_GET_FIRST(trx->signals); if (type == TRX_SIG_COMMIT) { while (sig != NULL) { if (sig->type == TRX_SIG_TOTAL_ROLLBACK) { return(FALSE); } sig = UT_LIST_GET_NEXT(signals, sig); } return(TRUE); } else if (type == TRX_SIG_TOTAL_ROLLBACK) { while (sig != NULL) { if (sig->type == TRX_SIG_COMMIT) { return(FALSE); } sig = UT_LIST_GET_NEXT(signals, sig); } return(TRUE); } else if (type == TRX_SIG_BREAK_EXECUTION) { return(TRUE); } else { ut_error; return(FALSE); } } /******************************************************************** Sends a signal to a trx object. */ void trx_sig_send( /*=========*/ trx_t* trx, /* in: trx handle */ ulint type, /* in: signal type */ ulint sender, /* in: TRX_SIG_SELF or TRX_SIG_OTHER_SESS */ que_thr_t* receiver_thr, /* in: query thread which wants the reply, or NULL; if type is TRX_SIG_END_WAIT, this must be NULL */ trx_savept_t* savept, /* in: possible rollback savepoint, or NULL */ que_thr_t** next_thr) /* in/out: next query thread to run; if the value which is passed in is a pointer to a NULL pointer, then the calling function can start running a new query thread; if the parameter is NULL, it is ignored */ { trx_sig_t* sig; trx_t* receiver_trx; ut_ad(trx); ut_ad(mutex_own(&kernel_mutex)); if (!trx_sig_is_compatible(trx, type, sender)) { /* The signal is not compatible with the other signals in the queue: die */ ut_error; } /* Queue the signal object */ if (UT_LIST_GET_LEN(trx->signals) == 0) { /* The signal list is empty: the 'sig' slot must be unused (we improve performance a bit by avoiding mem_alloc) */ sig = &(trx->sig); } else { /* It might be that the 'sig' slot is unused also in this case, but we choose the easy way of using mem_alloc */ sig = mem_alloc(sizeof(trx_sig_t)); } UT_LIST_ADD_LAST(signals, trx->signals, sig); sig->type = type; sig->sender = sender; sig->receiver = receiver_thr; if (savept) { sig->savept = *savept; } if (receiver_thr) { receiver_trx = thr_get_trx(receiver_thr); UT_LIST_ADD_LAST(reply_signals, receiver_trx->reply_signals, sig); } if (trx->sess->state == SESS_ERROR) { trx_sig_reply_wait_to_suspended(trx); } if ((sender != TRX_SIG_SELF) || (type == TRX_SIG_BREAK_EXECUTION)) { ut_error; } /* If there were no other signals ahead in the queue, try to start handling of the signal */ if (UT_LIST_GET_FIRST(trx->signals) == sig) { trx_sig_start_handle(trx, next_thr); } } /******************************************************************** Ends signal handling. If the session is in the error state, and trx->graph_before_signal_handling != NULL, then returns control to the error handling routine of the graph (currently just returns the control to the graph root which then will send an error message to the client). */ void trx_end_signal_handling( /*====================*/ trx_t* trx) /* in: trx */ { ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->handling_signals == TRUE); trx->handling_signals = FALSE; trx->graph = trx->graph_before_signal_handling; if (trx->graph && (trx->sess->state == SESS_ERROR)) { que_fork_error_handle(trx, trx->graph); } } /******************************************************************** Starts handling of a trx signal. */ void trx_sig_start_handle( /*=================*/ trx_t* trx, /* in: trx handle */ que_thr_t** next_thr) /* in/out: next query thread to run; if the value which is passed in is a pointer to a NULL pointer, then the calling function can start running a new query thread; if the parameter is NULL, it is ignored */ { trx_sig_t* sig; ulint type; loop: /* We loop in this function body as long as there are queued signals we can process immediately */ ut_ad(trx); ut_ad(mutex_own(&kernel_mutex)); if (trx->handling_signals && (UT_LIST_GET_LEN(trx->signals) == 0)) { trx_end_signal_handling(trx); return; } if (trx->conc_state == TRX_NOT_STARTED) { trx_start_low(trx, ULINT_UNDEFINED); } /* If the trx is in a lock wait state, moves the waiting query threads to the suspended state */ if (trx->que_state == TRX_QUE_LOCK_WAIT) { trx_lock_wait_to_suspended(trx); } /* If the session is in the error state and this trx has threads waiting for reply from signals, moves these threads to the suspended state, canceling wait reservations; note that if the transaction has sent a commit or rollback signal to itself, and its session is not in the error state, then nothing is done here. */ if (trx->sess->state == SESS_ERROR) { trx_sig_reply_wait_to_suspended(trx); } /* If there are no running query threads, we can start processing of a signal, otherwise we have to wait until all query threads of this transaction are aware of the arrival of the signal. */ if (trx->n_active_thrs > 0) { return; } if (trx->handling_signals == FALSE) { trx->graph_before_signal_handling = trx->graph; trx->handling_signals = TRUE; } sig = UT_LIST_GET_FIRST(trx->signals); type = sig->type; if (type == TRX_SIG_COMMIT) { trx_handle_commit_sig_off_kernel(trx, next_thr); } else if ((type == TRX_SIG_TOTAL_ROLLBACK) || (type == TRX_SIG_ROLLBACK_TO_SAVEPT)) { trx_rollback(trx, sig, next_thr); /* No further signals can be handled until the rollback completes, therefore we return */ return; } else if (type == TRX_SIG_ERROR_OCCURRED) { trx_rollback(trx, sig, next_thr); /* No further signals can be handled until the rollback completes, therefore we return */ return; } else if (type == TRX_SIG_BREAK_EXECUTION) { trx_sig_reply(sig, next_thr); trx_sig_remove(trx, sig); } else { ut_error; } goto loop; } /******************************************************************** Send the reply message when a signal in the queue of the trx has been handled. */ void trx_sig_reply( /*==========*/ trx_sig_t* sig, /* in: signal */ que_thr_t** next_thr) /* in/out: next query thread to run; if the value which is passed in is a pointer to a NULL pointer, then the calling function can start running a new query thread */ { trx_t* receiver_trx; ut_ad(sig); ut_ad(mutex_own(&kernel_mutex)); if (sig->receiver != NULL) { ut_ad((sig->receiver)->state == QUE_THR_SIG_REPLY_WAIT); receiver_trx = thr_get_trx(sig->receiver); UT_LIST_REMOVE(reply_signals, receiver_trx->reply_signals, sig); ut_ad(receiver_trx->sess->state != SESS_ERROR); que_thr_end_wait(sig->receiver, next_thr); sig->receiver = NULL; } } /******************************************************************** Removes a signal object from the trx signal queue. */ void trx_sig_remove( /*===========*/ trx_t* trx, /* in: trx handle */ trx_sig_t* sig) /* in, own: signal */ { ut_ad(trx && sig); ut_ad(mutex_own(&kernel_mutex)); ut_ad(sig->receiver == NULL); UT_LIST_REMOVE(signals, trx->signals, sig); sig->type = 0; /* reset the field to catch possible bugs */ if (sig != &(trx->sig)) { mem_free(sig); } } /************************************************************************* Creates a commit command node struct. */ commit_node_t* commit_node_create( /*===============*/ /* out, own: commit node struct */ mem_heap_t* heap) /* in: mem heap where created */ { commit_node_t* node; node = mem_heap_alloc(heap, sizeof(commit_node_t)); node->common.type = QUE_NODE_COMMIT; node->state = COMMIT_NODE_SEND; return(node); } /*************************************************************** Performs an execution step for a commit type node in a query graph. */ que_thr_t* trx_commit_step( /*============*/ /* out: query thread to run next, or NULL */ que_thr_t* thr) /* in: query thread */ { commit_node_t* node; que_thr_t* next_thr; node = thr->run_node; ut_ad(que_node_get_type(node) == QUE_NODE_COMMIT); if (thr->prev_node == que_node_get_parent(node)) { node->state = COMMIT_NODE_SEND; } if (node->state == COMMIT_NODE_SEND) { mutex_enter(&kernel_mutex); node->state = COMMIT_NODE_WAIT; next_thr = NULL; thr->state = QUE_THR_SIG_REPLY_WAIT; /* Send the commit signal to the transaction */ trx_sig_send(thr_get_trx(thr), TRX_SIG_COMMIT, TRX_SIG_SELF, thr, NULL, &next_thr); mutex_exit(&kernel_mutex); return(next_thr); } ut_ad(node->state == COMMIT_NODE_WAIT); node->state = COMMIT_NODE_SEND; thr->run_node = que_node_get_parent(node); return(thr); } /************************************************************************** Does the transaction commit for MySQL. */ ulint trx_commit_for_mysql( /*=================*/ /* out: 0 or error number */ trx_t* trx) /* in: trx handle */ { /* Because we do not do the commit by sending an Innobase sig to the transaction, we must here make sure that trx has been started. */ ut_a(trx); trx->op_info = "committing"; /* If we are doing the XA recovery of prepared transactions, then the transaction object does not have an InnoDB session object, and we set the dummy session that we use for all MySQL transactions. */ if (trx->sess == NULL) { /* Open a dummy session */ if (!trx_dummy_sess) { mutex_enter(&kernel_mutex); if (!trx_dummy_sess) { trx_dummy_sess = sess_open(); } mutex_exit(&kernel_mutex); } trx->sess = trx_dummy_sess; } trx_start_if_not_started(trx); mutex_enter(&kernel_mutex); trx_commit_off_kernel(trx); mutex_exit(&kernel_mutex); trx->op_info = ""; return(0); } /************************************************************************** If required, flushes the log to disk if we called trx_commit_for_mysql() with trx->flush_log_later == TRUE. */ ulint trx_commit_complete_for_mysql( /*==========================*/ /* out: 0 or error number */ trx_t* trx) /* in: trx handle */ { dulint lsn = trx->commit_lsn; ut_a(trx); trx->op_info = "flushing log"; if (!trx->must_flush_log_later) { /* Do nothing */ } else if (srv_flush_log_at_trx_commit == 0) { /* Do nothing */ } else if (srv_flush_log_at_trx_commit == 1) { if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { /* Write the log to the log files AND flush them to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE); } } else if (srv_flush_log_at_trx_commit == 2) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { ut_error; } trx->must_flush_log_later = FALSE; trx->op_info = ""; return(0); } /************************************************************************** Marks the latest SQL statement ended. */ void trx_mark_sql_stat_end( /*==================*/ trx_t* trx) /* in: trx handle */ { ut_a(trx); if (trx->conc_state == TRX_NOT_STARTED) { trx->undo_no = ut_dulint_zero; } trx->last_sql_stat_start.least_undo_no = trx->undo_no; } /************************************************************************** Prints info about a transaction to the given file. The caller must own the kernel mutex. */ void trx_print( /*======*/ FILE* f, /* in: output stream */ trx_t* trx, /* in: transaction */ ulint max_query_len) /* in: max query length to print, or 0 to use the default max length */ { ibool newline; fprintf(f, "TRANSACTION %lu %lu", (ulong) ut_dulint_get_high(trx->id), (ulong) ut_dulint_get_low(trx->id)); switch (trx->conc_state) { case TRX_NOT_STARTED: fputs(", not started", f); break; case TRX_ACTIVE: fprintf(f, ", ACTIVE %lu sec", (ulong)difftime(time(NULL), trx->start_time)); break; case TRX_PREPARED: fprintf(f, ", ACTIVE (PREPARED) %lu sec", (ulong)difftime(time(NULL), trx->start_time)); break; case TRX_COMMITTED_IN_MEMORY: fputs(", COMMITTED IN MEMORY", f); break; default: fprintf(f, " state %lu", (ulong) trx->conc_state); } #ifdef UNIV_LINUX fprintf(f, ", process no %lu", trx->mysql_process_no); #endif fprintf(f, ", OS thread id %lu", (ulong) os_thread_pf(trx->mysql_thread_id)); if (*trx->op_info) { putc(' ', f); fputs(trx->op_info, f); } if (trx->is_purge) { fputs(" purge trx", f); } if (trx->declared_to_be_inside_innodb) { fprintf(f, ", thread declared inside InnoDB %lu", (ulong) trx->n_tickets_to_enter_innodb); } putc('\n', f); if (trx->n_mysql_tables_in_use > 0 || trx->mysql_n_tables_locked > 0) { fprintf(f, "mysql tables in use %lu, locked %lu\n", (ulong) trx->n_mysql_tables_in_use, (ulong) trx->mysql_n_tables_locked); } newline = TRUE; switch (trx->que_state) { case TRX_QUE_RUNNING: newline = FALSE; break; case TRX_QUE_LOCK_WAIT: fputs("LOCK WAIT ", f); break; case TRX_QUE_ROLLING_BACK: fputs("ROLLING BACK ", f); break; case TRX_QUE_COMMITTING: fputs("COMMITTING ", f); break; default: fprintf(f, "que state %lu ", (ulong) trx->que_state); } if (0 < UT_LIST_GET_LEN(trx->trx_locks) || mem_heap_get_size(trx->lock_heap) > 400) { newline = TRUE; fprintf(f, "%lu lock struct(s), heap size %lu," " %lu row lock(s)", (ulong) UT_LIST_GET_LEN(trx->trx_locks), (ulong) mem_heap_get_size(trx->lock_heap), (ulong) lock_number_of_rows_locked(trx)); } if (trx->has_search_latch) { newline = TRUE; fputs(", holds adaptive hash latch", f); } if (ut_dulint_cmp(trx->undo_no, ut_dulint_zero) != 0) { newline = TRUE; fprintf(f, ", undo log entries %lu", (ulong) ut_dulint_get_low(trx->undo_no)); } if (newline) { putc('\n', f); } if (trx->mysql_thd != NULL) { innobase_mysql_print_thd(f, trx->mysql_thd, max_query_len); } } /*********************************************************************** Compares the "weight" (or size) of two transactions. The weight of one transaction is estimated as the number of altered rows + the number of locked rows. Transactions that have edited non-transactional tables are considered heavier than ones that have not. */ int trx_weight_cmp( /*===========*/ /* out: <0, 0 or >0; similar to strcmp(3) */ trx_t* a, /* in: the first transaction to be compared */ trx_t* b) /* in: the second transaction to be compared */ { ibool a_notrans_edit; ibool b_notrans_edit; /* If mysql_thd is NULL for a transaction we assume that it has not edited non-transactional tables. */ a_notrans_edit = a->mysql_thd != NULL && thd_has_edited_nontrans_tables(a->mysql_thd); b_notrans_edit = b->mysql_thd != NULL && thd_has_edited_nontrans_tables(b->mysql_thd); if (a_notrans_edit && !b_notrans_edit) { return(1); } if (!a_notrans_edit && b_notrans_edit) { return(-1); } /* Either both had edited non-transactional tables or both had not, we fall back to comparing the number of altered/locked rows. */ #if 0 fprintf(stderr, "%s TRX_WEIGHT(a): %lld+%lu, TRX_WEIGHT(b): %lld+%lu\n", __func__, ut_conv_dulint_to_longlong(a->undo_no), UT_LIST_GET_LEN(a->trx_locks), ut_conv_dulint_to_longlong(b->undo_no), UT_LIST_GET_LEN(b->trx_locks)); #endif #define TRX_WEIGHT(t) \ ut_dulint_add((t)->undo_no, UT_LIST_GET_LEN((t)->trx_locks)) return(ut_dulint_cmp(TRX_WEIGHT(a), TRX_WEIGHT(b))); } /******************************************************************** Prepares a transaction. */ void trx_prepare_off_kernel( /*===================*/ trx_t* trx) /* in: transaction */ { trx_rseg_t* rseg; ibool must_flush_log = FALSE; dulint lsn; mtr_t mtr; ut_ad(mutex_own(&kernel_mutex)); rseg = trx->rseg; if (trx->insert_undo != NULL || trx->update_undo != NULL) { mutex_exit(&kernel_mutex); mtr_start(&mtr); must_flush_log = TRUE; /* Change the undo log segment states from TRX_UNDO_ACTIVE to TRX_UNDO_PREPARED: these modifications to the file data structure define the transaction as prepared in the file-based world, at the serialization point of lsn. */ mutex_enter(&(rseg->mutex)); if (trx->insert_undo != NULL) { /* It is not necessary to obtain trx->undo_mutex here because only a single OS thread is allowed to do the transaction prepare for this transaction. */ trx_undo_set_state_at_prepare(trx, trx->insert_undo, &mtr); } if (trx->update_undo) { trx_undo_set_state_at_prepare( trx, trx->update_undo, &mtr); } mutex_exit(&(rseg->mutex)); /*--------------*/ mtr_commit(&mtr); /* This mtr commit makes the transaction prepared in the file-based world */ /*--------------*/ lsn = mtr.end_lsn; mutex_enter(&kernel_mutex); } ut_ad(mutex_own(&kernel_mutex)); /*--------------------------------------*/ trx->conc_state = TRX_PREPARED; trx_n_prepared++; /*--------------------------------------*/ if (must_flush_log) { /* Depending on the my.cnf options, we may now write the log buffer to the log files, making the prepared state of the transaction durable if the OS does not crash. We may also flush the log files to disk, making the prepared state of the transaction durable also at an OS crash or a power outage. The idea in InnoDB's group prepare is that a group of transactions gather behind a trx doing a physical disk write to log files, and when that physical write has been completed, one of those transactions does a write which prepares the whole group. Note that this group prepare will only bring benefit if there are > 2 users in the database. Then at least 2 users can gather behind one doing the physical log write to disk. TODO: find out if MySQL holds some mutex when calling this. That would spoil our group prepare algorithm. */ mutex_exit(&kernel_mutex); if (srv_flush_log_at_trx_commit == 0) { /* Do nothing */ } else if (srv_flush_log_at_trx_commit == 1) { if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { /* Write the log to the log files AND flush them to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE); } } else if (srv_flush_log_at_trx_commit == 2) { /* Write the log but do not flush it to disk */ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE); } else { ut_error; } mutex_enter(&kernel_mutex); } } /************************************************************************** Does the transaction prepare for MySQL. */ ulint trx_prepare_for_mysql( /*==================*/ /* out: 0 or error number */ trx_t* trx) /* in: trx handle */ { /* Because we do not do the prepare by sending an Innobase sig to the transaction, we must here make sure that trx has been started. */ ut_a(trx); trx->op_info = "preparing"; trx_start_if_not_started(trx); mutex_enter(&kernel_mutex); trx_prepare_off_kernel(trx); mutex_exit(&kernel_mutex); trx->op_info = ""; return(0); } /************************************************************************** This function is used to find number of prepared transactions and their transaction objects for a recovery. */ int trx_recover_for_mysql( /*==================*/ /* out: number of prepared transactions stored in xid_list */ XID* xid_list, /* in/out: prepared transactions */ ulint len) /* in: number of slots in xid_list */ { trx_t* trx; ulint count = 0; ut_ad(xid_list); ut_ad(len); /* We should set those transactions which are in the prepared state to the xid_list */ mutex_enter(&kernel_mutex); trx = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx) { if (trx->conc_state == TRX_PREPARED) { xid_list[count] = trx->xid; if (count == 0) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Starting recovery for" " XA transactions...\n"); } ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction %lu %lu in" " prepared state after recovery\n", (ulong) ut_dulint_get_high(trx->id), (ulong) ut_dulint_get_low(trx->id)); ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction contains changes" " to %lu rows\n", (ulong) ut_conv_dulint_to_longlong( trx->undo_no)); count++; if (count == len) { break; } } trx = UT_LIST_GET_NEXT(trx_list, trx); } mutex_exit(&kernel_mutex); if (count > 0){ ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: %lu transactions in prepared state" " after recovery\n", (ulong) count); } return ((int) count); } /*********************************************************************** This function is used to find one X/Open XA distributed transaction which is in the prepared state */ trx_t* trx_get_trx_by_xid( /*===============*/ /* out: trx or NULL; on match, the trx->xid will be invalidated */ const XID* xid) /* in: X/Open XA transaction identifier */ { trx_t* trx; if (xid == NULL) { return(NULL); } mutex_enter(&kernel_mutex); trx = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx) { /* Compare two X/Open XA transaction id's: their length should be the same and binary comparison of gtrid_lenght+bqual_length bytes should be the same */ if (trx->conc_state == TRX_PREPARED && xid->gtrid_length == trx->xid.gtrid_length && xid->bqual_length == trx->xid.bqual_length && memcmp(xid->data, trx->xid.data, xid->gtrid_length + xid->bqual_length) == 0) { /* Invalidate the XID, so that subsequent calls will not find it. */ memset(&trx->xid, 0, sizeof(trx->xid)); trx->xid.formatID = -1; break; } trx = UT_LIST_GET_NEXT(trx_list, trx); } mutex_exit(&kernel_mutex); return(trx); }
// // Copyright (c) 2009-2010 Mikko Mononen memon@inside.org // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgment in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. // #include <float.h> #include <string.h> #include "DetourNavMeshQuery.h" #include "DetourNavMesh.h" #include "DetourNode.h" #include "DetourCommon.h" #include "DetourMath.h" #include "DetourAlloc.h" #include "DetourAssert.h" #include <new> /// @class dtQueryFilter /// /// <b>The Default Implementation</b> /// /// At construction: All area costs default to 1.0. All flags are included /// and none are excluded. /// /// If a polygon has both an include and an exclude flag, it will be excluded. /// /// The way filtering works, a navigation mesh polygon must have at least one flag /// set to ever be considered by a query. So a polygon with no flags will never /// be considered. /// /// Setting the include flags to 0 will result in all polygons being excluded. /// /// <b>Custom Implementations</b> /// /// DT_VIRTUAL_QUERYFILTER must be defined in order to extend this class. /// /// Implement a custom query filter by overriding the virtual passFilter() /// and getCost() functions. If this is done, both functions should be as /// fast as possible. Use cached local copies of data rather than accessing /// your own objects where possible. /// /// Custom implementations do not need to adhere to the flags or cost logic /// used by the default implementation. /// /// In order for A* searches to work properly, the cost should be proportional to /// the travel distance. Implementing a cost modifier less than 1.0 is likely /// to lead to problems during pathfinding. /// /// @see dtNavMeshQuery dtQueryFilter::dtQueryFilter() : m_includeFlags(0xffff), m_excludeFlags(0) { for (int i = 0; i < DT_MAX_AREAS; ++i) m_areaCost[i] = 1.0f; } #ifdef DT_VIRTUAL_QUERYFILTER bool dtQueryFilter::passFilter(const dtPolyRef /*ref*/, const dtMeshTile* /*tile*/, const dtPoly* poly) const { return (poly->flags & m_includeFlags) != 0 && (poly->flags & m_excludeFlags) == 0; } float dtQueryFilter::getCost(const float* pa, const float* pb, const dtPolyRef /*prevRef*/, const dtMeshTile* /*prevTile*/, const dtPoly* /*prevPoly*/, const dtPolyRef /*curRef*/, const dtMeshTile* /*curTile*/, const dtPoly* curPoly, const dtPolyRef /*nextRef*/, const dtMeshTile* /*nextTile*/, const dtPoly* /*nextPoly*/) const { return dtVdist(pa, pb) * m_areaCost[curPoly->getArea()]; } #else inline bool dtQueryFilter::passFilter(const dtPolyRef /*ref*/, const dtMeshTile* /*tile*/, const dtPoly* poly) const { return (poly->flags & m_includeFlags) != 0 && (poly->flags & m_excludeFlags) == 0; } inline float dtQueryFilter::getCost(const float* pa, const float* pb, const dtPolyRef /*prevRef*/, const dtMeshTile* /*prevTile*/, const dtPoly* /*prevPoly*/, const dtPolyRef /*curRef*/, const dtMeshTile* /*curTile*/, const dtPoly* curPoly, const dtPolyRef /*nextRef*/, const dtMeshTile* /*nextTile*/, const dtPoly* /*nextPoly*/) const { return dtVdist(pa, pb) * m_areaCost[curPoly->getArea()]; } #endif static const float H_SCALE = 0.999f; // Search heuristic scale. dtNavMeshQuery* dtAllocNavMeshQuery() { void* mem = dtAlloc(sizeof(dtNavMeshQuery), DT_ALLOC_PERM); if (!mem) return 0; return new(mem) dtNavMeshQuery; } void dtFreeNavMeshQuery(dtNavMeshQuery* navmesh) { if (!navmesh) return; navmesh->~dtNavMeshQuery(); dtFree(navmesh); } ////////////////////////////////////////////////////////////////////////////////////////// /// @class dtNavMeshQuery /// /// For methods that support undersized buffers, if the buffer is too small /// to hold the entire result set the return status of the method will include /// the #DT_BUFFER_TOO_SMALL flag. /// /// Constant member functions can be used by multiple clients without side /// effects. (E.g. No change to the closed list. No impact on an in-progress /// sliced path query. Etc.) /// /// Walls and portals: A @e wall is a polygon segment that is /// considered impassable. A @e portal is a passable segment between polygons. /// A portal may be treated as a wall based on the dtQueryFilter used for a query. /// /// @see dtNavMesh, dtQueryFilter, #dtAllocNavMeshQuery(), #dtAllocNavMeshQuery() dtNavMeshQuery::dtNavMeshQuery() : m_nav(0), m_tinyNodePool(0), m_nodePool(0), m_openList(0) { memset(&m_query, 0, sizeof(dtQueryData)); } dtNavMeshQuery::~dtNavMeshQuery() { if (m_tinyNodePool) m_tinyNodePool->~dtNodePool(); if (m_nodePool) m_nodePool->~dtNodePool(); if (m_openList) m_openList->~dtNodeQueue(); dtFree(m_tinyNodePool); dtFree(m_nodePool); dtFree(m_openList); } /// @par /// /// Must be the first function called after construction, before other /// functions are used. /// /// This function can be used multiple times. dtStatus dtNavMeshQuery::init(const dtNavMesh* nav, const int maxNodes, unsigned int threadId) { m_nav = nav; m_owningThread = threadId; if (!m_nodePool || m_nodePool->getMaxNodes() < maxNodes) { if (m_nodePool) { m_nodePool->~dtNodePool(); dtFree(m_nodePool); m_nodePool = 0; } m_nodePool = new (dtAlloc(sizeof(dtNodePool), DT_ALLOC_PERM)) dtNodePool(maxNodes, dtNextPow2(maxNodes/4)); if (!m_nodePool) return DT_FAILURE | DT_OUT_OF_MEMORY; } else { m_nodePool->clear(); } if (!m_tinyNodePool) { m_tinyNodePool = new (dtAlloc(sizeof(dtNodePool), DT_ALLOC_PERM)) dtNodePool(64, 32); if (!m_tinyNodePool) return DT_FAILURE | DT_OUT_OF_MEMORY; } else { m_tinyNodePool->clear(); } // TODO: check the open list size too. if (!m_openList || m_openList->getCapacity() < maxNodes) { if (m_openList) { m_openList->~dtNodeQueue(); dtFree(m_openList); m_openList = 0; } m_openList = new (dtAlloc(sizeof(dtNodeQueue), DT_ALLOC_PERM)) dtNodeQueue(maxNodes); if (!m_openList) return DT_FAILURE | DT_OUT_OF_MEMORY; } else { m_openList->clear(); } return DT_SUCCESS; } dtStatus dtNavMeshQuery::findRandomPoint(const dtQueryFilter* filter, float (*frand)(), dtPolyRef* randomRef, float* randomPt) const { dtAssert(m_nav); // Randomly pick one tile. Assume that all tiles cover roughly the same area. const dtMeshTile* tile = 0; float tsum = 0.0f; for (int i = 0; i < m_nav->getMaxTiles(); i++) { const dtMeshTile* t = m_nav->getTile(i); if (!t || !t->header) continue; // Choose random tile using reservoi sampling. const float area = 1.0f; // Could be tile area too. tsum += area; const float u = frand(); if (u*tsum <= area) tile = t; } if (!tile) return DT_FAILURE; // Randomly pick one polygon weighted by polygon area. const dtPoly* poly = 0; dtPolyRef polyRef = 0; const dtPolyRef base = m_nav->getPolyRefBase(tile); float areaSum = 0.0f; for (int i = 0; i < tile->header->polyCount; ++i) { const dtPoly* p = &tile->polys[i]; // Do not return off-mesh connection polygons. if (p->getType() != DT_POLYTYPE_GROUND) continue; // Must pass filter const dtPolyRef ref = base | (dtPolyRef)i; if (!filter->passFilter(ref, tile, p)) continue; // Calc area of the polygon. float polyArea = 0.0f; for (int j = 2; j < p->vertCount; ++j) { const float* va = &tile->verts[p->verts[0]*3]; const float* vb = &tile->verts[p->verts[j-1]*3]; const float* vc = &tile->verts[p->verts[j]*3]; polyArea += dtTriArea2D(va,vb,vc); } // Choose random polygon weighted by area, using reservoi sampling. areaSum += polyArea; const float u = frand(); if (u*areaSum <= polyArea) { poly = p; polyRef = ref; } } if (!poly) return DT_FAILURE; // Randomly pick point on polygon. const float* v = &tile->verts[poly->verts[0]*3]; float verts[3*DT_VERTS_PER_POLYGON]; float areas[DT_VERTS_PER_POLYGON]; dtVcopy(&verts[0*3],v); for (int j = 1; j < poly->vertCount; ++j) { v = &tile->verts[poly->verts[j]*3]; dtVcopy(&verts[j*3],v); } const float s = frand(); const float t = frand(); float pt[3]; dtRandomPointInConvexPoly(verts, poly->vertCount, areas, s, t, pt); float h = 0.0f; dtStatus status = getPolyHeight(polyRef, pt, &h); if (dtStatusFailed(status)) return status; pt[1] = h; dtVcopy(randomPt, pt); *randomRef = polyRef; return DT_SUCCESS; } dtStatus dtNavMeshQuery::findRandomPointAroundCircle(dtPolyRef startRef, const float* centerPos, const float radius, const dtQueryFilter* filter, float (*frand)(), dtPolyRef* randomRef, float* randomPt) const { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; const dtMeshTile* startTile = 0; const dtPoly* startPoly = 0; m_nav->getTileAndPolyByRefUnsafe(startRef, &startTile, &startPoly); if (!filter->passFilter(startRef, startTile, startPoly)) return DT_FAILURE | DT_INVALID_PARAM; m_nodePool->clear(); m_openList->clear(); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, centerPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = 0; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); dtStatus status = DT_SUCCESS; const float radiusSqr = dtSqr(radius); float areaSum = 0.0f; const dtMeshTile* randomTile = 0; const dtPoly* randomPoly = 0; dtPolyRef randomPolyRef = 0; while (!m_openList->empty()) { dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); // Place random locations on on ground. if (bestPoly->getType() == DT_POLYTYPE_GROUND) { // Calc area of the polygon. float polyArea = 0.0f; for (int j = 2; j < bestPoly->vertCount; ++j) { const float* va = &bestTile->verts[bestPoly->verts[0]*3]; const float* vb = &bestTile->verts[bestPoly->verts[j-1]*3]; const float* vc = &bestTile->verts[bestPoly->verts[j]*3]; polyArea += dtTriArea2D(va,vb,vc); } // Choose random polygon weighted by area, using reservoi sampling. areaSum += polyArea; const float u = frand(); if (u*areaSum <= polyArea) { randomTile = bestTile; randomPoly = bestPoly; randomPolyRef = bestRef; } } // Get parent poly and tile. dtPolyRef parentRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; if (bestNode->pidx) parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; if (parentRef) m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { const dtLink* link = &bestTile->links[i]; dtPolyRef neighbourRef = link->ref; // Skip invalid neighbours and do not follow back to parent. if (!neighbourRef || neighbourRef == parentRef) continue; // Expand to neighbour const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); // Do not advance if the polygon is excluded by the filter. if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // Find edge and calc distance to the edge. float va[3], vb[3]; if (!getPortalPoints(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) continue; // If the circle is not touching the next polygon, skip it. float tseg; float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); if (distSqr > radiusSqr) continue; dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); if (!neighbourNode) { status |= DT_OUT_OF_NODES; continue; } if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Cost if (neighbourNode->flags == 0) dtVlerp(neighbourNode->pos, va, vb, 0.5f); const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~DT_NODE_CLOSED); neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); neighbourNode->total = total; if (neighbourNode->flags & DT_NODE_OPEN) { m_openList->modify(neighbourNode); } else { neighbourNode->flags = DT_NODE_OPEN; m_openList->push(neighbourNode); } } } if (!randomPoly) return DT_FAILURE; // Randomly pick point on polygon. const float* v = &randomTile->verts[randomPoly->verts[0]*3]; float verts[3*DT_VERTS_PER_POLYGON]; float areas[DT_VERTS_PER_POLYGON]; dtVcopy(&verts[0*3],v); for (int j = 1; j < randomPoly->vertCount; ++j) { v = &randomTile->verts[randomPoly->verts[j]*3]; dtVcopy(&verts[j*3],v); } const float s = frand(); const float t = frand(); float pt[3]; dtRandomPointInConvexPoly(verts, randomPoly->vertCount, areas, s, t, pt); float h = 0.0f; dtStatus stat = getPolyHeight(randomPolyRef, pt, &h); if (dtStatusFailed(status)) return stat; pt[1] = h; dtVcopy(randomPt, pt); *randomRef = randomPolyRef; return DT_SUCCESS; } ////////////////////////////////////////////////////////////////////////////////////////// /// @par /// /// Uses the detail polygons to find the surface height. (Most accurate.) /// /// @p pos does not have to be within the bounds of the polygon or navigation mesh. /// /// See closestPointOnPolyBoundary() for a limited but faster option. /// dtStatus dtNavMeshQuery::closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest, bool* posOverPoly) const { dtAssert(m_nav); const dtMeshTile* tile = 0; const dtPoly* poly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(ref, &tile, &poly))) return DT_FAILURE | DT_INVALID_PARAM; if (!tile) return DT_FAILURE | DT_INVALID_PARAM; // Off-mesh connections don't have detail polygons. if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) { const float* v0 = &tile->verts[poly->verts[0]*3]; const float* v1 = &tile->verts[poly->verts[1]*3]; const float d0 = dtVdist(pos, v0); const float d1 = dtVdist(pos, v1); const float u = d0 / (d0+d1); dtVlerp(closest, v0, v1, u); if (posOverPoly) *posOverPoly = false; return DT_SUCCESS; } const unsigned int ip = (unsigned int)(poly - tile->polys); const dtPolyDetail* pd = &tile->detailMeshes[ip]; // Clamp point to be inside the polygon. float verts[DT_VERTS_PER_POLYGON*3]; float edged[DT_VERTS_PER_POLYGON]; float edget[DT_VERTS_PER_POLYGON]; const int nv = poly->vertCount; for (int i = 0; i < nv; ++i) dtVcopy(&verts[i*3], &tile->verts[poly->verts[i]*3]); dtVcopy(closest, pos); if (!dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget)) { // Point is outside the polygon, dtClamp to nearest edge. float dmin = FLT_MAX; int imin = -1; for (int i = 0; i < nv; ++i) { if (edged[i] < dmin) { dmin = edged[i]; imin = i; } } const float* va = &verts[imin*3]; const float* vb = &verts[((imin+1)%nv)*3]; dtVlerp(closest, va, vb, edget[imin]); if (posOverPoly) *posOverPoly = false; } else { if (posOverPoly) *posOverPoly = true; } // Find height at the location. for (int j = 0; j < pd->triCount; ++j) { const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; const float* v[3]; for (int k = 0; k < 3; ++k) { if (t[k] < poly->vertCount) v[k] = &tile->verts[poly->verts[t[k]]*3]; else v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; } float h; if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) { closest[1] = h; break; } } return DT_SUCCESS; } /// @par /// /// Much faster than closestPointOnPoly(). /// /// If the provided position lies within the polygon's xz-bounds (above or below), /// then @p pos and @p closest will be equal. /// /// The height of @p closest will be the polygon boundary. The height detail is not used. /// /// @p pos does not have to be within the bounds of the polybon or the navigation mesh. /// dtStatus dtNavMeshQuery::closestPointOnPolyBoundary(dtPolyRef ref, const float* pos, float* closest) const { dtAssert(m_nav); const dtMeshTile* tile = 0; const dtPoly* poly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(ref, &tile, &poly))) return DT_FAILURE | DT_INVALID_PARAM; // Collect vertices. float verts[DT_VERTS_PER_POLYGON*3]; float edged[DT_VERTS_PER_POLYGON]; float edget[DT_VERTS_PER_POLYGON]; int nv = 0; for (int i = 0; i < (int)poly->vertCount; ++i) { dtVcopy(&verts[nv*3], &tile->verts[poly->verts[i]*3]); nv++; } bool inside = dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget); if (inside) { // Point is inside the polygon, return the point. dtVcopy(closest, pos); } else { // Point is outside the polygon, dtClamp to nearest edge. float dmin = FLT_MAX; int imin = -1; for (int i = 0; i < nv; ++i) { if (edged[i] < dmin) { dmin = edged[i]; imin = i; } } const float* va = &verts[imin*3]; const float* vb = &verts[((imin+1)%nv)*3]; dtVlerp(closest, va, vb, edget[imin]); } return DT_SUCCESS; } /// @par /// /// Will return #DT_FAILURE if the provided position is outside the xz-bounds /// of the polygon. /// dtStatus dtNavMeshQuery::getPolyHeight(dtPolyRef ref, const float* pos, float* height) const { dtAssert(m_nav); const dtMeshTile* tile = 0; const dtPoly* poly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(ref, &tile, &poly))) return DT_FAILURE | DT_INVALID_PARAM; if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) { const float* v0 = &tile->verts[poly->verts[0]*3]; const float* v1 = &tile->verts[poly->verts[1]*3]; const float d0 = dtVdist2D(pos, v0); const float d1 = dtVdist2D(pos, v1); const float u = d0 / (d0+d1); if (height) *height = v0[1] + (v1[1] - v0[1]) * u; return DT_SUCCESS; } else { const unsigned int ip = (unsigned int)(poly - tile->polys); const dtPolyDetail* pd = &tile->detailMeshes[ip]; for (int j = 0; j < pd->triCount; ++j) { const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; const float* v[3]; for (int k = 0; k < 3; ++k) { if (t[k] < poly->vertCount) v[k] = &tile->verts[poly->verts[t[k]]*3]; else v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; } float h; if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) { if (height) *height = h; return DT_SUCCESS; } } } return DT_FAILURE | DT_INVALID_PARAM; } /// @par /// /// @note If the search box does not intersect any polygons the search will /// return #DT_SUCCESS, but @p nearestRef will be zero. So if in doubt, check /// @p nearestRef before using @p nearestPt. /// /// @warning This function is not suitable for large area searches. If the search /// extents overlaps more than 128 polygons it may return an invalid result. /// dtStatus dtNavMeshQuery::findNearestPoly(const float* center, const float* extents, const dtQueryFilter* filter, dtPolyRef* nearestRef, float* nearestPt) const { dtAssert(m_nav); *nearestRef = 0; // Get nearby polygons from proximity grid. dtPolyRef polys[128]; int polyCount = 0; if (dtStatusFailed(queryPolygons(center, extents, filter, polys, &polyCount, 128))) return DT_FAILURE | DT_INVALID_PARAM; // Find nearest polygon amongst the nearby polygons. dtPolyRef nearest = 0; float nearestDistanceSqr = FLT_MAX; for (int i = 0; i < polyCount; ++i) { dtPolyRef ref = polys[i]; float closestPtPoly[3]; float diff[3]; bool posOverPoly = false; float d = 0; closestPointOnPoly(ref, center, closestPtPoly, &posOverPoly); // If a point is directly over a polygon and closer than // climb height, favor that instead of straight line nearest point. dtVsub(diff, center, closestPtPoly); if (posOverPoly) { const dtMeshTile* tile = 0; const dtPoly* poly = 0; m_nav->getTileAndPolyByRefUnsafe(polys[i], &tile, &poly); d = dtAbs(diff[1]) - tile->header->walkableClimb; d = d > 0 ? d*d : 0; } else { d = dtVlenSqr(diff); } if (d < nearestDistanceSqr) { if (nearestPt) dtVcopy(nearestPt, closestPtPoly); nearestDistanceSqr = d; nearest = ref; } } if (nearestRef) *nearestRef = nearest; return DT_SUCCESS; } int dtNavMeshQuery::queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax, const dtQueryFilter* filter, dtPolyRef* polys, const int maxPolys) const { dtAssert(m_nav); if (tile->bvTree) { const dtBVNode* node = &tile->bvTree[0]; const dtBVNode* end = &tile->bvTree[tile->header->bvNodeCount]; const float* tbmin = tile->header->bmin; const float* tbmax = tile->header->bmax; const float qfac = tile->header->bvQuantFactor; // Calculate quantized box unsigned short bmin[3], bmax[3]; // dtClamp query box to world box. float minx = dtClamp(qmin[0], tbmin[0], tbmax[0]) - tbmin[0]; float miny = dtClamp(qmin[1], tbmin[1], tbmax[1]) - tbmin[1]; float minz = dtClamp(qmin[2], tbmin[2], tbmax[2]) - tbmin[2]; float maxx = dtClamp(qmax[0], tbmin[0], tbmax[0]) - tbmin[0]; float maxy = dtClamp(qmax[1], tbmin[1], tbmax[1]) - tbmin[1]; float maxz = dtClamp(qmax[2], tbmin[2], tbmax[2]) - tbmin[2]; // Quantize bmin[0] = (unsigned short)(qfac * minx) & 0xfffe; bmin[1] = (unsigned short)(qfac * miny) & 0xfffe; bmin[2] = (unsigned short)(qfac * minz) & 0xfffe; bmax[0] = (unsigned short)(qfac * maxx + 1) | 1; bmax[1] = (unsigned short)(qfac * maxy + 1) | 1; bmax[2] = (unsigned short)(qfac * maxz + 1) | 1; // Traverse tree const dtPolyRef base = m_nav->getPolyRefBase(tile); int n = 0; while (node < end) { const bool overlap = dtOverlapQuantBounds(bmin, bmax, node->bmin, node->bmax); const bool isLeafNode = node->i >= 0; if (isLeafNode && overlap) { dtPolyRef ref = base | (dtPolyRef)node->i; if (filter->passFilter(ref, tile, &tile->polys[node->i])) { if (n < maxPolys) polys[n++] = ref; } } if (overlap || isLeafNode) node++; else { const int escapeIndex = -node->i; node += escapeIndex; } } return n; } else { float bmin[3], bmax[3]; int n = 0; const dtPolyRef base = m_nav->getPolyRefBase(tile); for (int i = 0; i < tile->header->polyCount; ++i) { const dtPoly* p = &tile->polys[i]; // Do not return off-mesh connection polygons. if (p->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) continue; // Must pass filter const dtPolyRef ref = base | (dtPolyRef)i; if (!filter->passFilter(ref, tile, p)) continue; // Calc polygon bounds. const float* v = &tile->verts[p->verts[0]*3]; dtVcopy(bmin, v); dtVcopy(bmax, v); for (int j = 1; j < p->vertCount; ++j) { v = &tile->verts[p->verts[j]*3]; dtVmin(bmin, v); dtVmax(bmax, v); } if (dtOverlapBounds(qmin,qmax, bmin,bmax)) { if (n < maxPolys) polys[n++] = ref; } } return n; } } /// @par /// /// If no polygons are found, the function will return #DT_SUCCESS with a /// @p polyCount of zero. /// /// If @p polys is too small to hold the entire result set, then the array will /// be filled to capacity. The method of choosing which polygons from the /// full set are included in the partial result set is undefined. /// dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents, const dtQueryFilter* filter, dtPolyRef* polys, int* polyCount, const int maxPolys) const { dtAssert(m_nav); float bmin[3], bmax[3]; dtVsub(bmin, center, extents); dtVadd(bmax, center, extents); // Find tiles the query touches. int minx, miny, maxx, maxy; m_nav->calcTileLoc(bmin, &minx, &miny); m_nav->calcTileLoc(bmax, &maxx, &maxy); static const int MAX_NEIS = 32; const dtMeshTile* neis[MAX_NEIS]; int n = 0; for (int y = miny; y <= maxy; ++y) { for (int x = minx; x <= maxx; ++x) { const int nneis = m_nav->getTilesAt(x,y,neis,MAX_NEIS); for (int j = 0; j < nneis; ++j) { n += queryPolygonsInTile(neis[j], bmin, bmax, filter, polys+n, maxPolys-n); if (n >= maxPolys) { *polyCount = n; return DT_SUCCESS | DT_BUFFER_TOO_SMALL; } } } } *polyCount = n; return DT_SUCCESS; } /// @par /// /// If the end polygon cannot be reached through the navigation graph, /// the last polygon in the path will be the nearest the end polygon. /// /// If the path array is to small to hold the full result, it will be filled as /// far as possible from the start polygon toward the end polygon. /// /// The start and end positions are used to calculate traversal costs. /// (The y-values impact the result.) /// dtStatus dtNavMeshQuery::findPath(dtPolyRef startRef, dtPolyRef endRef, const float* startPos, const float* endPos, const dtQueryFilter* filter, dtPolyRef* path, int* pathCount, const int maxPath) const { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); *pathCount = 0; if (!startRef || !endRef) return DT_FAILURE | DT_INVALID_PARAM; if (!maxPath) return DT_FAILURE | DT_INVALID_PARAM; // Validate input if (!m_nav->isValidPolyRef(startRef) || !m_nav->isValidPolyRef(endRef)) return DT_FAILURE | DT_INVALID_PARAM; if (startRef == endRef) { path[0] = startRef; *pathCount = 1; return DT_SUCCESS; } m_nodePool->clear(); m_openList->clear(); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, startPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = dtVdist(startPos, endPos) * H_SCALE; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); dtNode* lastBestNode = startNode; float lastBestNodeCost = startNode->total; dtStatus status = DT_SUCCESS; while (!m_openList->empty()) { // Remove node from open list and put it in closed list. dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Reached the goal, stop searching. if (bestNode->id == endRef) { lastBestNode = bestNode; break; } // Get current poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); // Get parent poly and tile. dtPolyRef parentRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; if (bestNode->pidx) parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; if (parentRef) m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { dtPolyRef neighbourRef = bestTile->links[i].ref; // Skip invalid ids and do not expand back to where we came from. if (!neighbourRef || neighbourRef == parentRef) continue; // Get neighbour poly and tile. // The API input has been cheked already, skip checking internal data. const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // deal explicitly with crossing tile boundaries unsigned char crossSide = 0; if (bestTile->links[i].side != 0xff) crossSide = bestTile->links[i].side >> 1; // get the node dtNode* neighbourNode = m_nodePool->getNode(neighbourRef, crossSide); if (!neighbourNode) { status |= DT_OUT_OF_NODES; continue; } // If the node is visited the first time, calculate node position. if (neighbourNode->flags == 0) { getEdgeMidPoint(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, neighbourNode->pos); } // Calculate cost and heuristic. float cost = 0; float heuristic = 0; // Special case for last node. if (neighbourRef == endRef) { // Cost const float curCost = filter->getCost(bestNode->pos, neighbourNode->pos, parentRef, parentTile, parentPoly, bestRef, bestTile, bestPoly, neighbourRef, neighbourTile, neighbourPoly); const float endCost = filter->getCost(neighbourNode->pos, endPos, bestRef, bestTile, bestPoly, neighbourRef, neighbourTile, neighbourPoly, 0, 0, 0); cost = bestNode->cost + curCost + endCost; heuristic = 0; } else { // Cost const float curCost = filter->getCost(bestNode->pos, neighbourNode->pos, parentRef, parentTile, parentPoly, bestRef, bestTile, bestPoly, neighbourRef, neighbourTile, neighbourPoly); cost = bestNode->cost + curCost; heuristic = dtVdist(neighbourNode->pos, endPos)*H_SCALE; } const float total = cost + heuristic; // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; // The node is already visited and process, and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_CLOSED) && total >= neighbourNode->total) continue; // Add or update the node. neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~DT_NODE_CLOSED); neighbourNode->cost = cost; neighbourNode->total = total; if (neighbourNode->flags & DT_NODE_OPEN) { // Already in open, update node location. m_openList->modify(neighbourNode); } else { // Put the node in open list. neighbourNode->flags |= DT_NODE_OPEN; m_openList->push(neighbourNode); } // Update nearest node to target so far. if (heuristic < lastBestNodeCost) { lastBestNodeCost = heuristic; lastBestNode = neighbourNode; } } } if (lastBestNode->id != endRef) status |= DT_PARTIAL_RESULT; // Reverse the path. dtNode* prev = 0; dtNode* node = lastBestNode; do { dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); node->pidx = m_nodePool->getNodeIdx(prev); prev = node; node = next; } while (node); // Store path node = prev; int n = 0; do { path[n++] = node->id; if (n >= maxPath) { status |= DT_BUFFER_TOO_SMALL; break; } node = m_nodePool->getNodeAtIdx(node->pidx); } while (node); *pathCount = n; return status; } /// @par /// /// @warning Calling any non-slice methods before calling finalizeSlicedFindPath() /// or finalizeSlicedFindPathPartial() may result in corrupted data! /// /// The @p filter pointer is stored and used for the duration of the sliced /// path query. /// dtStatus dtNavMeshQuery::initSlicedFindPath(dtPolyRef startRef, dtPolyRef endRef, const float* startPos, const float* endPos, const dtQueryFilter* filter, const unsigned int options) { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); // Init path state. memset(&m_query, 0, sizeof(dtQueryData)); m_query.status = DT_FAILURE; m_query.startRef = startRef; m_query.endRef = endRef; dtVcopy(m_query.startPos, startPos); dtVcopy(m_query.endPos, endPos); m_query.filter = filter; m_query.options = options; m_query.raycastLimitSqr = FLT_MAX; if (!startRef || !endRef) return DT_FAILURE | DT_INVALID_PARAM; // Validate input if (!m_nav->isValidPolyRef(startRef) || !m_nav->isValidPolyRef(endRef)) return DT_FAILURE | DT_INVALID_PARAM; // trade quality with performance? if (options & DT_FINDPATH_ANY_ANGLE) { // limiting to several times the character radius yields nice results. It is not sensitive // so it is enough to compute it from the first tile. const dtMeshTile* tile = m_nav->getTileByRef(startRef); float agentRadius = tile->header->walkableRadius; m_query.raycastLimitSqr = dtSqr(agentRadius * DT_RAY_CAST_LIMIT_PROPORTIONS); } if (startRef == endRef) { m_query.status = DT_SUCCESS; return DT_SUCCESS; } m_nodePool->clear(); m_openList->clear(); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, startPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = dtVdist(startPos, endPos) * H_SCALE; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); m_query.status = DT_IN_PROGRESS; m_query.lastBestNode = startNode; m_query.lastBestNodeCost = startNode->total; return m_query.status; } dtStatus dtNavMeshQuery::updateSlicedFindPath(const int maxIter, int* doneIters) { if (!dtStatusInProgress(m_query.status)) return m_query.status; // Make sure the request is still valid. if (!m_nav->isValidPolyRef(m_query.startRef) || !m_nav->isValidPolyRef(m_query.endRef)) { m_query.status = DT_FAILURE; return DT_FAILURE; } dtRaycastHit rayHit; rayHit.maxPath = 0; int iter = 0; while (iter < maxIter && !m_openList->empty()) { iter++; // Remove node from open list and put it in closed list. dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Reached the goal, stop searching. if (bestNode->id == m_query.endRef) { m_query.lastBestNode = bestNode; const dtStatus details = m_query.status & DT_STATUS_DETAIL_MASK; m_query.status = DT_SUCCESS | details; if (doneIters) *doneIters = iter; return m_query.status; } // Get current poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(bestRef, &bestTile, &bestPoly))) { // The polygon has disappeared during the sliced query, fail. m_query.status = DT_FAILURE; if (doneIters) *doneIters = iter; return m_query.status; } // Get parent and grand parent poly and tile. dtPolyRef parentRef = 0, grandpaRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; dtNode* parentNode = 0; if (bestNode->pidx) { parentNode = m_nodePool->getNodeAtIdx(bestNode->pidx); parentRef = parentNode->id; if (parentNode->pidx) grandpaRef = m_nodePool->getNodeAtIdx(parentNode->pidx)->id; } if (parentRef) { bool invalidParent = dtStatusFailed(m_nav->getTileAndPolyByRef(parentRef, &parentTile, &parentPoly)); if (invalidParent || (grandpaRef && !m_nav->isValidPolyRef(grandpaRef)) ) { // The polygon has disappeared during the sliced query, fail. m_query.status = DT_FAILURE; if (doneIters) *doneIters = iter; return m_query.status; } } // decide whether to test raycast to previous nodes bool tryLOS = false; if (m_query.options & DT_FINDPATH_ANY_ANGLE) { if ((parentRef != 0) && (dtVdistSqr(parentNode->pos, bestNode->pos) < m_query.raycastLimitSqr)) tryLOS = true; } for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { dtPolyRef neighbourRef = bestTile->links[i].ref; // Skip invalid ids and do not expand back to where we came from. if (!neighbourRef || neighbourRef == parentRef) continue; // Get neighbour poly and tile. // The API input has been cheked already, skip checking internal data. const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); if (!m_query.filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // get the neighbor node dtNode* neighbourNode = m_nodePool->getNode(neighbourRef, 0); if (!neighbourNode) { m_query.status |= DT_OUT_OF_NODES; continue; } // do not expand to nodes that were already visited from the same parent if (neighbourNode->pidx != 0 && neighbourNode->pidx == bestNode->pidx) continue; // If the node is visited the first time, calculate node position. if (neighbourNode->flags == 0) { getEdgeMidPoint(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, neighbourNode->pos); } // Calculate cost and heuristic. float cost = 0; float heuristic = 0; // raycast parent bool foundShortCut = false; rayHit.pathCost = rayHit.t = 0; if (tryLOS) { raycast(parentRef, parentNode->pos, neighbourNode->pos, m_query.filter, DT_RAYCAST_USE_COSTS, &rayHit, grandpaRef); foundShortCut = rayHit.t >= 1.0f; } // update move cost if (foundShortCut) { // shortcut found using raycast. Using shorter cost instead cost = parentNode->cost + rayHit.pathCost; } else { // No shortcut found. const float curCost = m_query.filter->getCost(bestNode->pos, neighbourNode->pos, parentRef, parentTile, parentPoly, bestRef, bestTile, bestPoly, neighbourRef, neighbourTile, neighbourPoly); cost = bestNode->cost + curCost; } // Special case for last node. if (neighbourRef == m_query.endRef) { const float endCost = m_query.filter->getCost(neighbourNode->pos, m_query.endPos, bestRef, bestTile, bestPoly, neighbourRef, neighbourTile, neighbourPoly, 0, 0, 0); cost = cost + endCost; heuristic = 0; } else { heuristic = dtVdist(neighbourNode->pos, m_query.endPos)*H_SCALE; } const float total = cost + heuristic; // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; // The node is already visited and process, and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_CLOSED) && total >= neighbourNode->total) continue; // Add or update the node. neighbourNode->pidx = foundShortCut ? bestNode->pidx : m_nodePool->getNodeIdx(bestNode); neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~(DT_NODE_CLOSED | DT_NODE_PARENT_DETACHED)); neighbourNode->cost = cost; neighbourNode->total = total; if (foundShortCut) neighbourNode->flags = (neighbourNode->flags | DT_NODE_PARENT_DETACHED); if (neighbourNode->flags & DT_NODE_OPEN) { // Already in open, update node location. m_openList->modify(neighbourNode); } else { // Put the node in open list. neighbourNode->flags |= DT_NODE_OPEN; m_openList->push(neighbourNode); } // Update nearest node to target so far. if (heuristic < m_query.lastBestNodeCost) { m_query.lastBestNodeCost = heuristic; m_query.lastBestNode = neighbourNode; } } } // Exhausted all nodes, but could not find path. if (m_openList->empty()) { const dtStatus details = m_query.status & DT_STATUS_DETAIL_MASK; m_query.status = DT_SUCCESS | details; } if (doneIters) *doneIters = iter; return m_query.status; } dtStatus dtNavMeshQuery::finalizeSlicedFindPath(dtPolyRef* path, int* pathCount, const int maxPath) { *pathCount = 0; if (dtStatusFailed(m_query.status)) { // Reset query. memset(&m_query, 0, sizeof(dtQueryData)); return DT_FAILURE; } int n = 0; if (m_query.startRef == m_query.endRef) { // Special case: the search starts and ends at same poly. path[n++] = m_query.startRef; } else { // Reverse the path. dtAssert(m_query.lastBestNode); if (m_query.lastBestNode->id != m_query.endRef) m_query.status |= DT_PARTIAL_RESULT; dtNode* prev = 0; dtNode* node = m_query.lastBestNode; int prevRay = 0; do { dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); node->pidx = m_nodePool->getNodeIdx(prev); prev = node; int nextRay = node->flags & DT_NODE_PARENT_DETACHED; // keep track of whether parent is not adjacent (i.e. due to raycast shortcut) node->flags = (node->flags & ~DT_NODE_PARENT_DETACHED) | prevRay; // and store it in the reversed path's node prevRay = nextRay; node = next; } while (node); // Store path node = prev; do { dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); dtStatus status = 0; if (node->flags & DT_NODE_PARENT_DETACHED) { float t, normal[3]; int m; status = raycast(node->id, node->pos, next->pos, m_query.filter, &t, normal, path+n, &m, maxPath-n); n += m; // raycast ends on poly boundary and the path might include the next poly boundary. if (path[n-1] == next->id) n--; // remove to avoid duplicates } else { path[n++] = node->id; if (n >= maxPath) status = DT_BUFFER_TOO_SMALL; } if (status & DT_STATUS_DETAIL_MASK) { m_query.status |= status & DT_STATUS_DETAIL_MASK; break; } node = next; } while (node); } const dtStatus details = m_query.status & DT_STATUS_DETAIL_MASK; // Reset query. memset(&m_query, 0, sizeof(dtQueryData)); *pathCount = n; return DT_SUCCESS | details; } dtStatus dtNavMeshQuery::finalizeSlicedFindPathPartial(const dtPolyRef* existing, const int existingSize, dtPolyRef* path, int* pathCount, const int maxPath) { *pathCount = 0; if (existingSize == 0) { return DT_FAILURE; } if (dtStatusFailed(m_query.status)) { // Reset query. memset(&m_query, 0, sizeof(dtQueryData)); return DT_FAILURE; } int n = 0; if (m_query.startRef == m_query.endRef) { // Special case: the search starts and ends at same poly. path[n++] = m_query.startRef; } else { // Find furthest existing node that was visited. dtNode* prev = 0; dtNode* node = 0; for (int i = existingSize-1; i >= 0; --i) { m_nodePool->findNodes(existing[i], &node, 1); if (node) break; } if (!node) { m_query.status |= DT_PARTIAL_RESULT; dtAssert(m_query.lastBestNode); node = m_query.lastBestNode; } // Reverse the path. int prevRay = 0; do { dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); node->pidx = m_nodePool->getNodeIdx(prev); prev = node; int nextRay = node->flags & DT_NODE_PARENT_DETACHED; // keep track of whether parent is not adjacent (i.e. due to raycast shortcut) node->flags = (node->flags & ~DT_NODE_PARENT_DETACHED) | prevRay; // and store it in the reversed path's node prevRay = nextRay; node = next; } while (node); // Store path node = prev; do { dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); dtStatus status = 0; if (node->flags & DT_NODE_PARENT_DETACHED) { float t, normal[3]; int m; status = raycast(node->id, node->pos, next->pos, m_query.filter, &t, normal, path+n, &m, maxPath-n); n += m; // raycast ends on poly boundary and the path might include the next poly boundary. if (path[n-1] == next->id) n--; // remove to avoid duplicates } else { path[n++] = node->id; if (n >= maxPath) status = DT_BUFFER_TOO_SMALL; } if (status & DT_STATUS_DETAIL_MASK) { m_query.status |= status & DT_STATUS_DETAIL_MASK; break; } node = next; } while (node); } const dtStatus details = m_query.status & DT_STATUS_DETAIL_MASK; // Reset query. memset(&m_query, 0, sizeof(dtQueryData)); *pathCount = n; return DT_SUCCESS | details; } dtStatus dtNavMeshQuery::appendVertex(const float* pos, const unsigned char flags, const dtPolyRef ref, float* straightPath, unsigned char* straightPathFlags, dtPolyRef* straightPathRefs, int* straightPathCount, const int maxStraightPath) const { if ((*straightPathCount) > 0 && dtVequal(&straightPath[((*straightPathCount)-1)*3], pos)) { // The vertices are equal, update flags and poly. if (straightPathFlags) straightPathFlags[(*straightPathCount)-1] = flags; if (straightPathRefs) straightPathRefs[(*straightPathCount)-1] = ref; } else { // Append new vertex. dtVcopy(&straightPath[(*straightPathCount)*3], pos); if (straightPathFlags) straightPathFlags[(*straightPathCount)] = flags; if (straightPathRefs) straightPathRefs[(*straightPathCount)] = ref; (*straightPathCount)++; // If reached end of path or there is no space to append more vertices, return. if (flags == DT_STRAIGHTPATH_END || (*straightPathCount) >= maxStraightPath) { return DT_SUCCESS | (((*straightPathCount) >= maxStraightPath) ? DT_BUFFER_TOO_SMALL : 0); } } return DT_IN_PROGRESS; } dtStatus dtNavMeshQuery::appendPortals(const int startIdx, const int endIdx, const float* endPos, const dtPolyRef* path, float* straightPath, unsigned char* straightPathFlags, dtPolyRef* straightPathRefs, int* straightPathCount, const int maxStraightPath, const int options) const { const float* startPos = &straightPath[(*straightPathCount-1)*3]; // Append or update last vertex dtStatus stat = 0; for (int i = startIdx; i < endIdx; i++) { // Calculate portal const dtPolyRef from = path[i]; const dtMeshTile* fromTile = 0; const dtPoly* fromPoly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(from, &fromTile, &fromPoly))) return DT_FAILURE | DT_INVALID_PARAM; const dtPolyRef to = path[i+1]; const dtMeshTile* toTile = 0; const dtPoly* toPoly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(to, &toTile, &toPoly))) return DT_FAILURE | DT_INVALID_PARAM; float left[3], right[3]; if (dtStatusFailed(getPortalPoints(from, fromPoly, fromTile, to, toPoly, toTile, left, right))) break; if (options & DT_STRAIGHTPATH_AREA_CROSSINGS) { // Skip intersection if only area crossings are requested. if (fromPoly->getArea() == toPoly->getArea()) continue; } // Append intersection float s,t; if (dtIntersectSegSeg2D(startPos, endPos, left, right, s, t)) { float pt[3]; dtVlerp(pt, left,right, t); stat = appendVertex(pt, 0, path[i+1], straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); if (stat != DT_IN_PROGRESS) return stat; } } return DT_IN_PROGRESS; } /// @par /// /// This method peforms what is often called 'string pulling'. /// /// The start position is clamped to the first polygon in the path, and the /// end position is clamped to the last. So the start and end positions should /// normally be within or very near the first and last polygons respectively. /// /// The returned polygon references represent the reference id of the polygon /// that is entered at the associated path position. The reference id associated /// with the end point will always be zero. This allows, for example, matching /// off-mesh link points to their representative polygons. /// /// If the provided result buffers are too small for the entire result set, /// they will be filled as far as possible from the start toward the end /// position. /// dtStatus dtNavMeshQuery::findStraightPath(const float* startPos, const float* endPos, const dtPolyRef* path, const int pathSize, float* straightPath, unsigned char* straightPathFlags, dtPolyRef* straightPathRefs, int* straightPathCount, const int maxStraightPath, const int options) const { dtAssert(m_nav); *straightPathCount = 0; if (!maxStraightPath) return DT_FAILURE | DT_INVALID_PARAM; if (!path[0]) return DT_FAILURE | DT_INVALID_PARAM; dtStatus stat = 0; // TODO: Should this be callers responsibility? float closestStartPos[3]; if (dtStatusFailed(closestPointOnPolyBoundary(path[0], startPos, closestStartPos))) return DT_FAILURE | DT_INVALID_PARAM; float closestEndPos[3]; if (dtStatusFailed(closestPointOnPolyBoundary(path[pathSize-1], endPos, closestEndPos))) return DT_FAILURE | DT_INVALID_PARAM; // Add start point. stat = appendVertex(closestStartPos, DT_STRAIGHTPATH_START, path[0], straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); if (stat != DT_IN_PROGRESS) return stat; if (pathSize > 1) { float portalApex[3], portalLeft[3], portalRight[3]; dtVcopy(portalApex, closestStartPos); dtVcopy(portalLeft, portalApex); dtVcopy(portalRight, portalApex); int apexIndex = 0; int leftIndex = 0; int rightIndex = 0; unsigned char leftPolyType = 0; unsigned char rightPolyType = 0; dtPolyRef leftPolyRef = path[0]; dtPolyRef rightPolyRef = path[0]; for (int i = 0; i < pathSize; ++i) { float left[3], right[3]; unsigned char fromType, toType; if (i+1 < pathSize) { // Next portal. if (dtStatusFailed(getPortalPoints(path[i], path[i+1], left, right, fromType, toType))) { // Failed to get portal points, in practice this means that path[i+1] is invalid polygon. // Clamp the end point to path[i], and return the path so far. if (dtStatusFailed(closestPointOnPolyBoundary(path[i], endPos, closestEndPos))) { // This should only happen when the first polygon is invalid. return DT_FAILURE | DT_INVALID_PARAM; } // Apeend portals along the current straight path segment. if (options & (DT_STRAIGHTPATH_AREA_CROSSINGS | DT_STRAIGHTPATH_ALL_CROSSINGS)) { stat = appendPortals(apexIndex, i, closestEndPos, path, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath, options); } stat = appendVertex(closestEndPos, 0, path[i], straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); return DT_SUCCESS | DT_PARTIAL_RESULT | ((*straightPathCount >= maxStraightPath) ? DT_BUFFER_TOO_SMALL : 0); } // If starting really close the portal, advance. if (i == 0) { float t; if (dtDistancePtSegSqr2D(portalApex, left, right, t) < dtSqr(0.001f)) continue; } } else { // End of the path. dtVcopy(left, closestEndPos); dtVcopy(right, closestEndPos); fromType = toType = DT_POLYTYPE_GROUND; } // Right vertex. if (dtTriArea2D(portalApex, portalRight, right) <= 0.0f) { if (dtVequal(portalApex, portalRight) || dtTriArea2D(portalApex, portalLeft, right) > 0.0f) { dtVcopy(portalRight, right); rightPolyRef = (i+1 < pathSize) ? path[i+1] : 0; rightPolyType = toType; rightIndex = i; } else { // Append portals along the current straight path segment. if (options & (DT_STRAIGHTPATH_AREA_CROSSINGS | DT_STRAIGHTPATH_ALL_CROSSINGS)) { stat = appendPortals(apexIndex, leftIndex, portalLeft, path, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath, options); if (stat != DT_IN_PROGRESS) return stat; } dtVcopy(portalApex, portalLeft); apexIndex = leftIndex; unsigned char flags = 0; if (!leftPolyRef) flags = DT_STRAIGHTPATH_END; else if (leftPolyType == DT_POLYTYPE_OFFMESH_CONNECTION) flags = DT_STRAIGHTPATH_OFFMESH_CONNECTION; dtPolyRef ref = leftPolyRef; // Append or update vertex stat = appendVertex(portalApex, flags, ref, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); if (stat != DT_IN_PROGRESS) return stat; dtVcopy(portalLeft, portalApex); dtVcopy(portalRight, portalApex); leftIndex = apexIndex; rightIndex = apexIndex; // Restart i = apexIndex; continue; } } // Left vertex. if (dtTriArea2D(portalApex, portalLeft, left) >= 0.0f) { if (dtVequal(portalApex, portalLeft) || dtTriArea2D(portalApex, portalRight, left) < 0.0f) { dtVcopy(portalLeft, left); leftPolyRef = (i+1 < pathSize) ? path[i+1] : 0; leftPolyType = toType; leftIndex = i; } else { // Append portals along the current straight path segment. if (options & (DT_STRAIGHTPATH_AREA_CROSSINGS | DT_STRAIGHTPATH_ALL_CROSSINGS)) { stat = appendPortals(apexIndex, rightIndex, portalRight, path, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath, options); if (stat != DT_IN_PROGRESS) return stat; } dtVcopy(portalApex, portalRight); apexIndex = rightIndex; unsigned char flags = 0; if (!rightPolyRef) flags = DT_STRAIGHTPATH_END; else if (rightPolyType == DT_POLYTYPE_OFFMESH_CONNECTION) flags = DT_STRAIGHTPATH_OFFMESH_CONNECTION; dtPolyRef ref = rightPolyRef; // Append or update vertex stat = appendVertex(portalApex, flags, ref, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); if (stat != DT_IN_PROGRESS) return stat; dtVcopy(portalLeft, portalApex); dtVcopy(portalRight, portalApex); leftIndex = apexIndex; rightIndex = apexIndex; // Restart i = apexIndex; continue; } } } // Append portals along the current straight path segment. if (options & (DT_STRAIGHTPATH_AREA_CROSSINGS | DT_STRAIGHTPATH_ALL_CROSSINGS)) { stat = appendPortals(apexIndex, pathSize-1, closestEndPos, path, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath, options); if (stat != DT_IN_PROGRESS) return stat; } } stat = appendVertex(closestEndPos, DT_STRAIGHTPATH_END, 0, straightPath, straightPathFlags, straightPathRefs, straightPathCount, maxStraightPath); return DT_SUCCESS | ((*straightPathCount >= maxStraightPath) ? DT_BUFFER_TOO_SMALL : 0); } /// @par /// /// This method is optimized for small delta movement and a small number of /// polygons. If used for too great a distance, the result set will form an /// incomplete path. /// /// @p resultPos will equal the @p endPos if the end is reached. /// Otherwise the closest reachable position will be returned. /// /// @p resultPos is not projected onto the surface of the navigation /// mesh. Use #getPolyHeight if this is needed. /// /// This method treats the end position in the same manner as /// the #raycast method. (As a 2D point.) See that method's documentation /// for details. /// /// If the @p visited array is too small to hold the entire result set, it will /// be filled as far as possible from the start position toward the end /// position. /// dtStatus dtNavMeshQuery::moveAlongSurface(dtPolyRef startRef, const float* startPos, const float* endPos, const dtQueryFilter* filter, float* resultPos, dtPolyRef* visited, int* visitedCount, const int maxVisitedSize) const { dtAssert(m_nav); dtAssert(m_tinyNodePool); *visitedCount = 0; // Validate input if (!startRef) return DT_FAILURE | DT_INVALID_PARAM; if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; dtStatus status = DT_SUCCESS; static const int MAX_STACK = 48; dtNode* stack[MAX_STACK]; int nstack = 0; m_tinyNodePool->clear(); dtNode* startNode = m_tinyNodePool->getNode(startRef); startNode->pidx = 0; startNode->cost = 0; startNode->total = 0; startNode->id = startRef; startNode->flags = DT_NODE_CLOSED; stack[nstack++] = startNode; float bestPos[3]; float bestDist = FLT_MAX; dtNode* bestNode = 0; dtVcopy(bestPos, startPos); // Search constraints float searchPos[3], searchRadSqr; dtVlerp(searchPos, startPos, endPos, 0.5f); searchRadSqr = dtSqr(dtVdist(startPos, endPos)/2.0f + 0.001f); float verts[DT_VERTS_PER_POLYGON*3]; while (nstack) { // Pop front. dtNode* curNode = stack[0]; for (int i = 0; i < nstack-1; ++i) stack[i] = stack[i+1]; nstack--; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef curRef = curNode->id; const dtMeshTile* curTile = 0; const dtPoly* curPoly = 0; m_nav->getTileAndPolyByRefUnsafe(curRef, &curTile, &curPoly); // Collect vertices. const int nverts = curPoly->vertCount; for (int i = 0; i < nverts; ++i) dtVcopy(&verts[i*3], &curTile->verts[curPoly->verts[i]*3]); // If target is inside the poly, stop search. if (dtPointInPolygon(endPos, verts, nverts)) { bestNode = curNode; dtVcopy(bestPos, endPos); break; } // Find wall edges and find nearest point inside the walls. for (int i = 0, j = (int)curPoly->vertCount-1; i < (int)curPoly->vertCount; j = i++) { // Find links to neighbours. static const int MAX_NEIS = 8; int nneis = 0; dtPolyRef neis[MAX_NEIS]; if (curPoly->neis[j] & DT_EXT_LINK) { // Tile border. for (unsigned int k = curPoly->firstLink; k != DT_NULL_LINK; k = curTile->links[k].next) { const dtLink* link = &curTile->links[k]; if (link->edge == j) { if (link->ref != 0) { const dtMeshTile* neiTile = 0; const dtPoly* neiPoly = 0; m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); if (filter->passFilter(link->ref, neiTile, neiPoly)) { if (nneis < MAX_NEIS) neis[nneis++] = link->ref; } } } } } else if (curPoly->neis[j]) { const unsigned int idx = (unsigned int)(curPoly->neis[j]-1); const dtPolyRef ref = m_nav->getPolyRefBase(curTile) | idx; if (filter->passFilter(ref, curTile, &curTile->polys[idx])) { // Internal edge, encode id. neis[nneis++] = ref; } } if (!nneis) { // Wall edge, calc distance. const float* vj = &verts[j*3]; const float* vi = &verts[i*3]; float tseg; const float distSqr = dtDistancePtSegSqr2D(endPos, vj, vi, tseg); if (distSqr < bestDist) { // Update nearest distance. dtVlerp(bestPos, vj,vi, tseg); bestDist = distSqr; bestNode = curNode; } } else { for (int k = 0; k < nneis; ++k) { // Skip if no node can be allocated. dtNode* neighbourNode = m_tinyNodePool->getNode(neis[k]); if (!neighbourNode) continue; // Skip if already visited. if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Skip the link if it is too far from search constraint. // TODO: Maybe should use getPortalPoints(), but this one is way faster. const float* vj = &verts[j*3]; const float* vi = &verts[i*3]; float tseg; float distSqr = dtDistancePtSegSqr2D(searchPos, vj, vi, tseg); if (distSqr > searchRadSqr) continue; // Mark as the node as visited and push to queue. if (nstack < MAX_STACK) { neighbourNode->pidx = m_tinyNodePool->getNodeIdx(curNode); neighbourNode->flags |= DT_NODE_CLOSED; stack[nstack++] = neighbourNode; } } } } } int n = 0; if (bestNode) { // Reverse the path. dtNode* prev = 0; dtNode* node = bestNode; do { dtNode* next = m_tinyNodePool->getNodeAtIdx(node->pidx); node->pidx = m_tinyNodePool->getNodeIdx(prev); prev = node; node = next; } while (node); // Store result node = prev; do { visited[n++] = node->id; if (n >= maxVisitedSize) { status |= DT_BUFFER_TOO_SMALL; break; } node = m_tinyNodePool->getNodeAtIdx(node->pidx); } while (node); } dtVcopy(resultPos, bestPos); *visitedCount = n; return status; } dtStatus dtNavMeshQuery::getPortalPoints(dtPolyRef from, dtPolyRef to, float* left, float* right, unsigned char& fromType, unsigned char& toType) const { dtAssert(m_nav); const dtMeshTile* fromTile = 0; const dtPoly* fromPoly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(from, &fromTile, &fromPoly))) return DT_FAILURE | DT_INVALID_PARAM; fromType = fromPoly->getType(); const dtMeshTile* toTile = 0; const dtPoly* toPoly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(to, &toTile, &toPoly))) return DT_FAILURE | DT_INVALID_PARAM; toType = toPoly->getType(); return getPortalPoints(from, fromPoly, fromTile, to, toPoly, toTile, left, right); } // Returns portal points between two polygons. dtStatus dtNavMeshQuery::getPortalPoints(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, float* left, float* right) const { // Find the link that points to the 'to' polygon. const dtLink* link = 0; for (unsigned int i = fromPoly->firstLink; i != DT_NULL_LINK; i = fromTile->links[i].next) { if (fromTile->links[i].ref == to) { link = &fromTile->links[i]; break; } } if (!link) return DT_FAILURE | DT_INVALID_PARAM; // Handle off-mesh connections. if (fromPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) { // Find link that points to first vertex. for (unsigned int i = fromPoly->firstLink; i != DT_NULL_LINK; i = fromTile->links[i].next) { if (fromTile->links[i].ref == to) { const int v = fromTile->links[i].edge; dtVcopy(left, &fromTile->verts[fromPoly->verts[v]*3]); dtVcopy(right, &fromTile->verts[fromPoly->verts[v]*3]); return DT_SUCCESS; } } return DT_FAILURE | DT_INVALID_PARAM; } if (toPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) { for (unsigned int i = toPoly->firstLink; i != DT_NULL_LINK; i = toTile->links[i].next) { if (toTile->links[i].ref == from) { const int v = toTile->links[i].edge; dtVcopy(left, &toTile->verts[toPoly->verts[v]*3]); dtVcopy(right, &toTile->verts[toPoly->verts[v]*3]); return DT_SUCCESS; } } return DT_FAILURE | DT_INVALID_PARAM; } // Find portal vertices. const int v0 = fromPoly->verts[link->edge]; const int v1 = fromPoly->verts[(link->edge+1) % (int)fromPoly->vertCount]; dtVcopy(left, &fromTile->verts[v0*3]); dtVcopy(right, &fromTile->verts[v1*3]); // If the link is at tile boundary, dtClamp the vertices to // the link width. if (link->side != 0xff) { // Unpack portal limits. if (link->bmin != 0 || link->bmax != 255) { const float s = 1.0f/255.0f; const float tmin = link->bmin*s; const float tmax = link->bmax*s; dtVlerp(left, &fromTile->verts[v0*3], &fromTile->verts[v1*3], tmin); dtVlerp(right, &fromTile->verts[v0*3], &fromTile->verts[v1*3], tmax); } } return DT_SUCCESS; } // Returns edge mid point between two polygons. dtStatus dtNavMeshQuery::getEdgeMidPoint(dtPolyRef from, dtPolyRef to, float* mid) const { float left[3], right[3]; unsigned char fromType, toType; if (dtStatusFailed(getPortalPoints(from, to, left,right, fromType, toType))) return DT_FAILURE | DT_INVALID_PARAM; mid[0] = (left[0]+right[0])*0.5f; mid[1] = (left[1]+right[1])*0.5f; mid[2] = (left[2]+right[2])*0.5f; return DT_SUCCESS; } dtStatus dtNavMeshQuery::getEdgeMidPoint(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, float* mid) const { float left[3], right[3]; if (dtStatusFailed(getPortalPoints(from, fromPoly, fromTile, to, toPoly, toTile, left, right))) return DT_FAILURE | DT_INVALID_PARAM; mid[0] = (left[0]+right[0])*0.5f; mid[1] = (left[1]+right[1])*0.5f; mid[2] = (left[2]+right[2])*0.5f; return DT_SUCCESS; } /// @par /// /// This method is meant to be used for quick, short distance checks. /// /// If the path array is too small to hold the result, it will be filled as /// far as possible from the start postion toward the end position. /// /// <b>Using the Hit Parameter (t)</b> /// /// If the hit parameter is a very high value (FLT_MAX), then the ray has hit /// the end position. In this case the path represents a valid corridor to the /// end position and the value of @p hitNormal is undefined. /// /// If the hit parameter is zero, then the start position is on the wall that /// was hit and the value of @p hitNormal is undefined. /// /// If 0 < t < 1.0 then the following applies: /// /// @code /// distanceToHitBorder = distanceToEndPosition * t /// hitPoint = startPos + (endPos - startPos) * t /// @endcode /// /// <b>Use Case Restriction</b> /// /// The raycast ignores the y-value of the end position. (2D check.) This /// places significant limits on how it can be used. For example: /// /// Consider a scene where there is a main floor with a second floor balcony /// that hangs over the main floor. So the first floor mesh extends below the /// balcony mesh. The start position is somewhere on the first floor. The end /// position is on the balcony. /// /// The raycast will search toward the end position along the first floor mesh. /// If it reaches the end position's xz-coordinates it will indicate FLT_MAX /// (no wall hit), meaning it reached the end position. This is one example of why /// this method is meant for short distance checks. /// dtStatus dtNavMeshQuery::raycast(dtPolyRef startRef, const float* startPos, const float* endPos, const dtQueryFilter* filter, float* t, float* hitNormal, dtPolyRef* path, int* pathCount, const int maxPath) const { dtRaycastHit hit; hit.path = path; hit.maxPath = maxPath; dtStatus status = raycast(startRef, startPos, endPos, filter, 0, &hit); *t = hit.t; if (hitNormal) dtVcopy(hitNormal, hit.hitNormal); if (pathCount) *pathCount = hit.pathCount; return status; } /// @par /// /// This method is meant to be used for quick, short distance checks. /// /// If the path array is too small to hold the result, it will be filled as /// far as possible from the start postion toward the end position. /// /// <b>Using the Hit Parameter t of RaycastHit</b> /// /// If the hit parameter is a very high value (FLT_MAX), then the ray has hit /// the end position. In this case the path represents a valid corridor to the /// end position and the value of @p hitNormal is undefined. /// /// If the hit parameter is zero, then the start position is on the wall that /// was hit and the value of @p hitNormal is undefined. /// /// If 0 < t < 1.0 then the following applies: /// /// @code /// distanceToHitBorder = distanceToEndPosition * t /// hitPoint = startPos + (endPos - startPos) * t /// @endcode /// /// <b>Use Case Restriction</b> /// /// The raycast ignores the y-value of the end position. (2D check.) This /// places significant limits on how it can be used. For example: /// /// Consider a scene where there is a main floor with a second floor balcony /// that hangs over the main floor. So the first floor mesh extends below the /// balcony mesh. The start position is somewhere on the first floor. The end /// position is on the balcony. /// /// The raycast will search toward the end position along the first floor mesh. /// If it reaches the end position's xz-coordinates it will indicate FLT_MAX /// (no wall hit), meaning it reached the end position. This is one example of why /// this method is meant for short distance checks. /// dtStatus dtNavMeshQuery::raycast(dtPolyRef startRef, const float* startPos, const float* endPos, const dtQueryFilter* filter, const unsigned int options, dtRaycastHit* hit, dtPolyRef prevRef) const { dtAssert(m_nav); hit->t = 0; hit->pathCount = 0; hit->pathCost = 0; // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; if (prevRef && !m_nav->isValidPolyRef(prevRef)) return DT_FAILURE | DT_INVALID_PARAM; float dir[3], curPos[3], lastPos[3]; float verts[DT_VERTS_PER_POLYGON*3+3]; int n = 0; dtVcopy(curPos, startPos); dtVsub(dir, endPos, startPos); dtVset(hit->hitNormal, 0, 0, 0); dtStatus status = DT_SUCCESS; const dtMeshTile* prevTile, *tile, *nextTile; const dtPoly* prevPoly, *poly, *nextPoly; dtPolyRef curRef, nextRef; // The API input has been checked already, skip checking internal data. nextRef = curRef = startRef; tile = 0; poly = 0; m_nav->getTileAndPolyByRefUnsafe(curRef, &tile, &poly); nextTile = prevTile = tile; nextPoly = prevPoly = poly; if (prevRef) m_nav->getTileAndPolyByRefUnsafe(prevRef, &prevTile, &prevPoly); while (curRef) { // Cast ray against current polygon. // Collect vertices. int nv = 0; for (int i = 0; i < (int)poly->vertCount; ++i) { dtVcopy(&verts[nv*3], &tile->verts[poly->verts[i]*3]); nv++; } float tmin, tmax; int segMin, segMax; if (!dtIntersectSegmentPoly2D(startPos, endPos, verts, nv, tmin, tmax, segMin, segMax)) { // Could not hit the polygon, keep the old t and report hit. hit->pathCount = n; return status; } // Nostalrius: may be a bugged triangle (3 aligned points) if (tmax - tmin < 0.00001f) return DT_FAILURE; // Keep track of furthest t so far. if (tmax > hit->t) hit->t = tmax; // Store visited polygons. if (n < hit->maxPath) hit->path[n++] = curRef; else status |= DT_BUFFER_TOO_SMALL; // Ray end is completely inside the polygon. if (segMax == -1) { hit->t = FLT_MAX; hit->pathCount = n; // add the cost if (options & DT_RAYCAST_USE_COSTS) hit->pathCost += filter->getCost(curPos, endPos, prevRef, prevTile, prevPoly, curRef, tile, poly, curRef, tile, poly); return status; } // Follow neighbours. nextRef = 0; for (unsigned int i = poly->firstLink; i != DT_NULL_LINK; i = tile->links[i].next) { const dtLink* link = &tile->links[i]; // Find link which contains this edge. if ((int)link->edge != segMax) continue; // Get pointer to the next polygon. nextTile = 0; nextPoly = 0; m_nav->getTileAndPolyByRefUnsafe(link->ref, &nextTile, &nextPoly); // Skip off-mesh connections. if (nextPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) continue; // Skip links based on filter. if (!filter->passFilter(link->ref, nextTile, nextPoly)) continue; // If the link is internal, just return the ref. if (link->side == 0xff) { nextRef = link->ref; break; } // If the link is at tile boundary, // Check if the link spans the whole edge, and accept. if (link->bmin == 0 && link->bmax == 255) { nextRef = link->ref; break; } // Check for partial edge links. const int v0 = poly->verts[link->edge]; const int v1 = poly->verts[(link->edge+1) % poly->vertCount]; const float* left = &tile->verts[v0*3]; const float* right = &tile->verts[v1*3]; // Check that the intersection lies inside the link portal. if (link->side == 0 || link->side == 4) { // Calculate link size. const float s = 1.0f/255.0f; float lmin = left[2] + (right[2] - left[2])*(link->bmin*s); float lmax = left[2] + (right[2] - left[2])*(link->bmax*s); if (lmin > lmax) dtSwap(lmin, lmax); // Find Z intersection. float z = startPos[2] + (endPos[2]-startPos[2])*tmax; if (z >= lmin && z <= lmax) { nextRef = link->ref; break; } } else if (link->side == 2 || link->side == 6) { // Calculate link size. const float s = 1.0f/255.0f; float lmin = left[0] + (right[0] - left[0])*(link->bmin*s); float lmax = left[0] + (right[0] - left[0])*(link->bmax*s); if (lmin > lmax) dtSwap(lmin, lmax); // Find X intersection. float x = startPos[0] + (endPos[0]-startPos[0])*tmax; if (x >= lmin && x <= lmax) { nextRef = link->ref; break; } } } // add the cost if (options & DT_RAYCAST_USE_COSTS) { // compute the intersection point at the furthest end of the polygon // and correct the height (since the raycast moves in 2d) dtVcopy(lastPos, curPos); dtVmad(curPos, startPos, dir, hit->t); float* e1 = &verts[segMax*3]; float* e2 = &verts[((segMax+1)%nv)*3]; float eDir[3], diff[3]; dtVsub(eDir, e2, e1); dtVsub(diff, curPos, e1); float s = dtSqr(eDir[0]) > dtSqr(eDir[2]) ? diff[0] / eDir[0] : diff[2] / eDir[2]; curPos[1] = e1[1] + eDir[1] * s; hit->pathCost += filter->getCost(lastPos, curPos, prevRef, prevTile, prevPoly, curRef, tile, poly, nextRef, nextTile, nextPoly); } if (!nextRef) { // No neighbour, we hit a wall. // Calculate hit normal. const int a = segMax; const int b = segMax+1 < nv ? segMax+1 : 0; const float* va = &verts[a*3]; const float* vb = &verts[b*3]; const float dx = vb[0] - va[0]; const float dz = vb[2] - va[2]; hit->hitNormal[0] = dz; hit->hitNormal[1] = 0; hit->hitNormal[2] = -dx; dtVnormalize(hit->hitNormal); hit->pathCount = n; return status; } // No hit, advance to neighbour polygon. prevRef = curRef; curRef = nextRef; prevTile = tile; tile = nextTile; prevPoly = poly; poly = nextPoly; } hit->pathCount = n; return status; } /// @par /// /// At least one result array must be provided. /// /// The order of the result set is from least to highest cost to reach the polygon. /// /// A common use case for this method is to perform Dijkstra searches. /// Candidate polygons are found by searching the graph beginning at the start polygon. /// /// If a polygon is not found via the graph search, even if it intersects the /// search circle, it will not be included in the result set. For example: /// /// polyA is the start polygon. /// polyB shares an edge with polyA. (Is adjacent.) /// polyC shares an edge with polyB, but not with polyA /// Even if the search circle overlaps polyC, it will not be included in the /// result set unless polyB is also in the set. /// /// The value of the center point is used as the start position for cost /// calculations. It is not projected onto the surface of the mesh, so its /// y-value will effect the costs. /// /// Intersection tests occur in 2D. All polygons and the search circle are /// projected onto the xz-plane. So the y-value of the center point does not /// effect intersection tests. /// /// If the result arrays are to small to hold the entire result set, they will be /// filled to capacity. /// dtStatus dtNavMeshQuery::findPolysAroundCircle(dtPolyRef startRef, const float* centerPos, const float radius, const dtQueryFilter* filter, dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, int* resultCount, const int maxResult) const { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); *resultCount = 0; // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; m_nodePool->clear(); m_openList->clear(); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, centerPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = 0; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); dtStatus status = DT_SUCCESS; int n = 0; if (n < maxResult) { if (resultRef) resultRef[n] = startNode->id; if (resultParent) resultParent[n] = 0; if (resultCost) resultCost[n] = 0; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } const float radiusSqr = dtSqr(radius); while (!m_openList->empty()) { dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); // Get parent poly and tile. dtPolyRef parentRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; if (bestNode->pidx) parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; if (parentRef) m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { const dtLink* link = &bestTile->links[i]; dtPolyRef neighbourRef = link->ref; // Skip invalid neighbours and do not follow back to parent. if (!neighbourRef || neighbourRef == parentRef) continue; // Expand to neighbour const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); // Do not advance if the polygon is excluded by the filter. if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // Find edge and calc distance to the edge. float va[3], vb[3]; if (!getPortalPoints(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) continue; // If the circle is not touching the next polygon, skip it. float tseg; float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); if (distSqr > radiusSqr) continue; dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); if (!neighbourNode) { status |= DT_OUT_OF_NODES; continue; } if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Cost if (neighbourNode->flags == 0) dtVlerp(neighbourNode->pos, va, vb, 0.5f); const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~DT_NODE_CLOSED); neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); neighbourNode->total = total; if (neighbourNode->flags & DT_NODE_OPEN) { m_openList->modify(neighbourNode); } else { if (n < maxResult) { if (resultRef) resultRef[n] = neighbourNode->id; if (resultParent) resultParent[n] = m_nodePool->getNodeAtIdx(neighbourNode->pidx)->id; if (resultCost) resultCost[n] = neighbourNode->total; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } neighbourNode->flags = DT_NODE_OPEN; m_openList->push(neighbourNode); } } } *resultCount = n; return status; } /// @par /// /// The order of the result set is from least to highest cost. /// /// At least one result array must be provided. /// /// A common use case for this method is to perform Dijkstra searches. /// Candidate polygons are found by searching the graph beginning at the start /// polygon. /// /// The same intersection test restrictions that apply to findPolysAroundCircle() /// method apply to this method. /// /// The 3D centroid of the search polygon is used as the start position for cost /// calculations. /// /// Intersection tests occur in 2D. All polygons are projected onto the /// xz-plane. So the y-values of the vertices do not effect intersection tests. /// /// If the result arrays are is too small to hold the entire result set, they will /// be filled to capacity. /// dtStatus dtNavMeshQuery::findPolysAroundShape(dtPolyRef startRef, const float* verts, const int nverts, const dtQueryFilter* filter, dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, int* resultCount, const int maxResult) const { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); *resultCount = 0; // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; m_nodePool->clear(); m_openList->clear(); float centerPos[3] = {0,0,0}; for (int i = 0; i < nverts; ++i) dtVadd(centerPos,centerPos,&verts[i*3]); dtVscale(centerPos,centerPos,1.0f/nverts); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, centerPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = 0; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); dtStatus status = DT_SUCCESS; int n = 0; if (n < maxResult) { if (resultRef) resultRef[n] = startNode->id; if (resultParent) resultParent[n] = 0; if (resultCost) resultCost[n] = 0; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } while (!m_openList->empty()) { dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); // Get parent poly and tile. dtPolyRef parentRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; if (bestNode->pidx) parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; if (parentRef) m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { const dtLink* link = &bestTile->links[i]; dtPolyRef neighbourRef = link->ref; // Skip invalid neighbours and do not follow back to parent. if (!neighbourRef || neighbourRef == parentRef) continue; // Expand to neighbour const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); // Do not advance if the polygon is excluded by the filter. if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // Find edge and calc distance to the edge. float va[3], vb[3]; if (!getPortalPoints(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) continue; // If the poly is not touching the edge to the next polygon, skip the connection it. float tmin, tmax; int segMin, segMax; if (!dtIntersectSegmentPoly2D(va, vb, verts, nverts, tmin, tmax, segMin, segMax)) continue; if (tmin > 1.0f || tmax < 0.0f) continue; dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); if (!neighbourNode) { status |= DT_OUT_OF_NODES; continue; } if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Cost if (neighbourNode->flags == 0) dtVlerp(neighbourNode->pos, va, vb, 0.5f); const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~DT_NODE_CLOSED); neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); neighbourNode->total = total; if (neighbourNode->flags & DT_NODE_OPEN) { m_openList->modify(neighbourNode); } else { if (n < maxResult) { if (resultRef) resultRef[n] = neighbourNode->id; if (resultParent) resultParent[n] = m_nodePool->getNodeAtIdx(neighbourNode->pidx)->id; if (resultCost) resultCost[n] = neighbourNode->total; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } neighbourNode->flags = DT_NODE_OPEN; m_openList->push(neighbourNode); } } } *resultCount = n; return status; } /// @par /// /// This method is optimized for a small search radius and small number of result /// polygons. /// /// Candidate polygons are found by searching the navigation graph beginning at /// the start polygon. /// /// The same intersection test restrictions that apply to the findPolysAroundCircle /// mehtod applies to this method. /// /// The value of the center point is used as the start point for cost calculations. /// It is not projected onto the surface of the mesh, so its y-value will effect /// the costs. /// /// Intersection tests occur in 2D. All polygons and the search circle are /// projected onto the xz-plane. So the y-value of the center point does not /// effect intersection tests. /// /// If the result arrays are is too small to hold the entire result set, they will /// be filled to capacity. /// dtStatus dtNavMeshQuery::findLocalNeighbourhood(dtPolyRef startRef, const float* centerPos, const float radius, const dtQueryFilter* filter, dtPolyRef* resultRef, dtPolyRef* resultParent, int* resultCount, const int maxResult) const { dtAssert(m_nav); dtAssert(m_tinyNodePool); *resultCount = 0; // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; static const int MAX_STACK = 48; dtNode* stack[MAX_STACK]; int nstack = 0; m_tinyNodePool->clear(); dtNode* startNode = m_tinyNodePool->getNode(startRef); startNode->pidx = 0; startNode->id = startRef; startNode->flags = DT_NODE_CLOSED; stack[nstack++] = startNode; const float radiusSqr = dtSqr(radius); float pa[DT_VERTS_PER_POLYGON*3]; float pb[DT_VERTS_PER_POLYGON*3]; dtStatus status = DT_SUCCESS; int n = 0; if (n < maxResult) { resultRef[n] = startNode->id; if (resultParent) resultParent[n] = 0; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } while (nstack) { // Pop front. dtNode* curNode = stack[0]; for (int i = 0; i < nstack-1; ++i) stack[i] = stack[i+1]; nstack--; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef curRef = curNode->id; const dtMeshTile* curTile = 0; const dtPoly* curPoly = 0; m_nav->getTileAndPolyByRefUnsafe(curRef, &curTile, &curPoly); for (unsigned int i = curPoly->firstLink; i != DT_NULL_LINK; i = curTile->links[i].next) { const dtLink* link = &curTile->links[i]; dtPolyRef neighbourRef = link->ref; // Skip invalid neighbours. if (!neighbourRef) continue; // Skip if cannot alloca more nodes. dtNode* neighbourNode = m_tinyNodePool->getNode(neighbourRef); if (!neighbourNode) continue; // Skip visited. if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Expand to neighbour const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); // Skip off-mesh connections. if (neighbourPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) continue; // Do not advance if the polygon is excluded by the filter. if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; // Find edge and calc distance to the edge. float va[3], vb[3]; if (!getPortalPoints(curRef, curPoly, curTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) continue; // If the circle is not touching the next polygon, skip it. float tseg; float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); if (distSqr > radiusSqr) continue; // Mark node visited, this is done before the overlap test so that // we will not visit the poly again if the test fails. neighbourNode->flags |= DT_NODE_CLOSED; neighbourNode->pidx = m_tinyNodePool->getNodeIdx(curNode); // Check that the polygon does not collide with existing polygons. // Collect vertices of the neighbour poly. const int npa = neighbourPoly->vertCount; for (int k = 0; k < npa; ++k) dtVcopy(&pa[k*3], &neighbourTile->verts[neighbourPoly->verts[k]*3]); bool overlap = false; for (int j = 0; j < n; ++j) { dtPolyRef pastRef = resultRef[j]; // Connected polys do not overlap. bool connected = false; for (unsigned int k = curPoly->firstLink; k != DT_NULL_LINK; k = curTile->links[k].next) { if (curTile->links[k].ref == pastRef) { connected = true; break; } } if (connected) continue; // Potentially overlapping. const dtMeshTile* pastTile = 0; const dtPoly* pastPoly = 0; m_nav->getTileAndPolyByRefUnsafe(pastRef, &pastTile, &pastPoly); // Get vertices and test overlap const int npb = pastPoly->vertCount; for (int k = 0; k < npb; ++k) dtVcopy(&pb[k*3], &pastTile->verts[pastPoly->verts[k]*3]); if (dtOverlapPolyPoly2D(pa,npa, pb,npb)) { overlap = true; break; } } if (overlap) continue; // This poly is fine, store and advance to the poly. if (n < maxResult) { resultRef[n] = neighbourRef; if (resultParent) resultParent[n] = curRef; ++n; } else { status |= DT_BUFFER_TOO_SMALL; } if (nstack < MAX_STACK) { stack[nstack++] = neighbourNode; } } } *resultCount = n; return status; } struct dtSegInterval { dtPolyRef ref; short tmin, tmax; }; static void insertInterval(dtSegInterval* ints, int& nints, const int maxInts, const short tmin, const short tmax, const dtPolyRef ref) { if (nints+1 > maxInts) return; // Find insertion point. int idx = 0; while (idx < nints) { if (tmax <= ints[idx].tmin) break; idx++; } // Move current results. if (nints-idx) memmove(ints+idx+1, ints+idx, sizeof(dtSegInterval)*(nints-idx)); // Store ints[idx].ref = ref; ints[idx].tmin = tmin; ints[idx].tmax = tmax; nints++; } /// @par /// /// If the @p segmentRefs parameter is provided, then all polygon segments will be returned. /// Otherwise only the wall segments are returned. /// /// A segment that is normally a portal will be included in the result set as a /// wall if the @p filter results in the neighbor polygon becoomming impassable. /// /// The @p segmentVerts and @p segmentRefs buffers should normally be sized for the /// maximum segments per polygon of the source navigation mesh. /// dtStatus dtNavMeshQuery::getPolyWallSegments(dtPolyRef ref, const dtQueryFilter* filter, float* segmentVerts, dtPolyRef* segmentRefs, int* segmentCount, const int maxSegments) const { dtAssert(m_nav); *segmentCount = 0; const dtMeshTile* tile = 0; const dtPoly* poly = 0; if (dtStatusFailed(m_nav->getTileAndPolyByRef(ref, &tile, &poly))) return DT_FAILURE | DT_INVALID_PARAM; int n = 0; static const int MAX_INTERVAL = 16; dtSegInterval ints[MAX_INTERVAL]; int nints; const bool storePortals = segmentRefs != 0; dtStatus status = DT_SUCCESS; for (int i = 0, j = (int)poly->vertCount-1; i < (int)poly->vertCount; j = i++) { // Skip non-solid edges. nints = 0; if (poly->neis[j] & DT_EXT_LINK) { // Tile border. for (unsigned int k = poly->firstLink; k != DT_NULL_LINK; k = tile->links[k].next) { const dtLink* link = &tile->links[k]; if (link->edge == j) { if (link->ref != 0) { const dtMeshTile* neiTile = 0; const dtPoly* neiPoly = 0; m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); if (filter->passFilter(link->ref, neiTile, neiPoly)) { insertInterval(ints, nints, MAX_INTERVAL, link->bmin, link->bmax, link->ref); } } } } } else { // Internal edge dtPolyRef neiRef = 0; if (poly->neis[j]) { const unsigned int idx = (unsigned int)(poly->neis[j]-1); neiRef = m_nav->getPolyRefBase(tile) | idx; if (!filter->passFilter(neiRef, tile, &tile->polys[idx])) neiRef = 0; } // If the edge leads to another polygon and portals are not stored, skip. if (neiRef != 0 && !storePortals) continue; if (n < maxSegments) { const float* vj = &tile->verts[poly->verts[j]*3]; const float* vi = &tile->verts[poly->verts[i]*3]; float* seg = &segmentVerts[n*6]; dtVcopy(seg+0, vj); dtVcopy(seg+3, vi); if (segmentRefs) segmentRefs[n] = neiRef; n++; } else { status |= DT_BUFFER_TOO_SMALL; } continue; } // Add sentinels insertInterval(ints, nints, MAX_INTERVAL, -1, 0, 0); insertInterval(ints, nints, MAX_INTERVAL, 255, 256, 0); // Store segments. const float* vj = &tile->verts[poly->verts[j]*3]; const float* vi = &tile->verts[poly->verts[i]*3]; for (int k = 1; k < nints; ++k) { // Portal segment. if (storePortals && ints[k].ref) { const float tmin = ints[k].tmin/255.0f; const float tmax = ints[k].tmax/255.0f; if (n < maxSegments) { float* seg = &segmentVerts[n*6]; dtVlerp(seg+0, vj,vi, tmin); dtVlerp(seg+3, vj,vi, tmax); if (segmentRefs) segmentRefs[n] = ints[k].ref; n++; } else { status |= DT_BUFFER_TOO_SMALL; } } // Wall segment. const int imin = ints[k-1].tmax; const int imax = ints[k].tmin; if (imin != imax) { const float tmin = imin/255.0f; const float tmax = imax/255.0f; if (n < maxSegments) { float* seg = &segmentVerts[n*6]; dtVlerp(seg+0, vj,vi, tmin); dtVlerp(seg+3, vj,vi, tmax); if (segmentRefs) segmentRefs[n] = 0; n++; } else { status |= DT_BUFFER_TOO_SMALL; } } } } *segmentCount = n; return status; } /// @par /// /// @p hitPos is not adjusted using the height detail data. /// /// @p hitDist will equal the search radius if there is no wall within the /// radius. In this case the values of @p hitPos and @p hitNormal are /// undefined. /// /// The normal will become unpredicable if @p hitDist is a very small number. /// dtStatus dtNavMeshQuery::findDistanceToWall(dtPolyRef startRef, const float* centerPos, const float maxRadius, const dtQueryFilter* filter, float* hitDist, float* hitPos, float* hitNormal) const { dtAssert(m_nav); dtAssert(m_nodePool); dtAssert(m_openList); // Validate input if (!startRef || !m_nav->isValidPolyRef(startRef)) return DT_FAILURE | DT_INVALID_PARAM; m_nodePool->clear(); m_openList->clear(); dtNode* startNode = m_nodePool->getNode(startRef); dtVcopy(startNode->pos, centerPos); startNode->pidx = 0; startNode->cost = 0; startNode->total = 0; startNode->id = startRef; startNode->flags = DT_NODE_OPEN; m_openList->push(startNode); float radiusSqr = dtSqr(maxRadius); dtStatus status = DT_SUCCESS; while (!m_openList->empty()) { dtNode* bestNode = m_openList->pop(); bestNode->flags &= ~DT_NODE_OPEN; bestNode->flags |= DT_NODE_CLOSED; // Get poly and tile. // The API input has been cheked already, skip checking internal data. const dtPolyRef bestRef = bestNode->id; const dtMeshTile* bestTile = 0; const dtPoly* bestPoly = 0; m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); // Get parent poly and tile. dtPolyRef parentRef = 0; const dtMeshTile* parentTile = 0; const dtPoly* parentPoly = 0; if (bestNode->pidx) parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; if (parentRef) m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); // Hit test walls. for (int i = 0, j = (int)bestPoly->vertCount-1; i < (int)bestPoly->vertCount; j = i++) { // Skip non-solid edges. if (bestPoly->neis[j] & DT_EXT_LINK) { // Tile border. bool solid = true; for (unsigned int k = bestPoly->firstLink; k != DT_NULL_LINK; k = bestTile->links[k].next) { const dtLink* link = &bestTile->links[k]; if (link->edge == j) { if (link->ref != 0) { const dtMeshTile* neiTile = 0; const dtPoly* neiPoly = 0; m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); if (filter->passFilter(link->ref, neiTile, neiPoly)) solid = false; } break; } } if (!solid) continue; } else if (bestPoly->neis[j]) { // Internal edge const unsigned int idx = (unsigned int)(bestPoly->neis[j]-1); const dtPolyRef ref = m_nav->getPolyRefBase(bestTile) | idx; if (filter->passFilter(ref, bestTile, &bestTile->polys[idx])) continue; } // Calc distance to the edge. const float* vj = &bestTile->verts[bestPoly->verts[j]*3]; const float* vi = &bestTile->verts[bestPoly->verts[i]*3]; float tseg; float distSqr = dtDistancePtSegSqr2D(centerPos, vj, vi, tseg); // Edge is too far, skip. if (distSqr > radiusSqr) continue; // Hit wall, update radius. radiusSqr = distSqr; // Calculate hit pos. hitPos[0] = vj[0] + (vi[0] - vj[0])*tseg; hitPos[1] = vj[1] + (vi[1] - vj[1])*tseg; hitPos[2] = vj[2] + (vi[2] - vj[2])*tseg; } for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) { const dtLink* link = &bestTile->links[i]; dtPolyRef neighbourRef = link->ref; // Skip invalid neighbours and do not follow back to parent. if (!neighbourRef || neighbourRef == parentRef) continue; // Expand to neighbour. const dtMeshTile* neighbourTile = 0; const dtPoly* neighbourPoly = 0; m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); // Skip off-mesh connections. if (neighbourPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) continue; // Calc distance to the edge. const float* va = &bestTile->verts[bestPoly->verts[link->edge]*3]; const float* vb = &bestTile->verts[bestPoly->verts[(link->edge+1) % bestPoly->vertCount]*3]; float tseg; float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); // If the circle is not touching the next polygon, skip it. if (distSqr > radiusSqr) continue; if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) continue; dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); if (!neighbourNode) { status |= DT_OUT_OF_NODES; continue; } if (neighbourNode->flags & DT_NODE_CLOSED) continue; // Cost if (neighbourNode->flags == 0) { getEdgeMidPoint(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, neighbourNode->pos); } const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); // The node is already in open list and the new result is worse, skip. if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) continue; neighbourNode->id = neighbourRef; neighbourNode->flags = (neighbourNode->flags & ~DT_NODE_CLOSED); neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); neighbourNode->total = total; if (neighbourNode->flags & DT_NODE_OPEN) { m_openList->modify(neighbourNode); } else { neighbourNode->flags |= DT_NODE_OPEN; m_openList->push(neighbourNode); } } } // Calc hit normal. dtVsub(hitNormal, centerPos, hitPos); dtVnormalize(hitNormal); *hitDist = dtMathSqrtf(radiusSqr); return status; } bool dtNavMeshQuery::isValidPolyRef(dtPolyRef ref, const dtQueryFilter* filter) const { const dtMeshTile* tile = 0; const dtPoly* poly = 0; dtStatus status = m_nav->getTileAndPolyByRef(ref, &tile, &poly); // If cannot get polygon, assume it does not exists and boundary is invalid. if (dtStatusFailed(status)) return false; // If cannot pass filter, assume flags has changed and boundary is invalid. if (!filter->passFilter(ref, tile, poly)) return false; return true; } /// @par /// /// The closed list is the list of polygons that were fully evaluated during /// the last navigation graph search. (A* or Dijkstra) /// bool dtNavMeshQuery::isInClosedList(dtPolyRef ref) const { if (!m_nodePool) return false; dtNode* nodes[DT_MAX_STATES_PER_NODE]; int n= m_nodePool->findNodes(ref, nodes, DT_MAX_STATES_PER_NODE); for (int i=0; i<n; i++) { if (nodes[i]->flags & DT_NODE_CLOSED) return true; } return false; }
/* * [The "BSD license"] * Copyright (c) 2010 Terence Parr * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.antlr.test; import org.antlr.Tool; import org.antlr.tool.*; import org.junit.Test; import java.io.File; public class TestCompositeGrammars extends BaseTest { protected boolean debug = false; @Test public void testWildcardStillWorks() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String grammar = "parser grammar S;\n" + "a : B . C ;\n"; // not qualified ID Grammar g = new Grammar(grammar); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); } @Test public void testDelegatorInvokesDelegateRule() throws Exception { String slave = "parser grammar S;\n" + "a : B {System.out.println(\"S.a\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "b", debug); assertEquals("S.a\n", found); } @Test public void testDelegatorInvokesDelegateRuleWithArgs() throws Exception { // must generate something like: // public int a(int x) throws RecognitionException { return gS.a(x); } // in M. String slave = "parser grammar S;\n" + "a[int x] returns [int y] : B {System.out.print(\"S.a\"); $y=1000;} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : label=a[3] {System.out.println($label.y);} ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "b", debug); assertEquals("S.a1000\n", found); } @Test public void testDelegatorInvokesDelegateRuleWithReturnStruct() throws Exception { // must generate something like: // public int a(int x) throws RecognitionException { return gS.a(x); } // in M. String slave = "parser grammar S;\n" + "a : B {System.out.print(\"S.a\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : a {System.out.println($a.text);} ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "b", debug); assertEquals("S.ab\n", found); } @Test public void testDelegatorAccessesDelegateMembers() throws Exception { String slave = "parser grammar S;\n" + "@members {\n" + " public void foo() {System.out.println(\"foo\");}\n" + "}\n" + "a : B ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + // uses no rules from the import "import S;\n" + "s : 'b' {gS.foo();} ;\n" + // gS is import pointer "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "b", debug); assertEquals("foo\n", found); } @Test public void testDelegatorInvokesFirstVersionOfDelegateRule() throws Exception { String slave = "parser grammar S;\n" + "a : b {System.out.println(\"S.a\");} ;\n" + "b : B ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "a : B {System.out.println(\"T.a\");} ;\n"; // hidden by S.a writeFile(tmpdir, "T.g", slave2); String master = "grammar M;\n" + "import S,T;\n" + "s : a ;\n" + "B : 'b' ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "b", debug); assertEquals("S.a\n", found); } @Test public void testDelegatesSeeSameTokenType() throws Exception { String slave = "parser grammar S;\n" + // A, B, C token type order "tokens { A; B; C; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "tokens { C; B; A; }\n" + // reverse order "y : A {System.out.println(\"T.y\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave2); // The lexer will create rules to match letters a, b, c. // The associated token types A, B, C must have the same value // and all import'd parsers. Since ANTLR regenerates all imports // for use with the delegator M, it can generate the same token type // mapping in each parser: // public static final int C=6; // public static final int EOF=-1; // public static final int B=5; // public static final int WS=7; // public static final int A=4; String master = "grammar M;\n" + "import S,T;\n" + "s : x y ;\n" + // matches AA, which should be "aa" "B : 'b' ;\n" + // another order: B, A, C "A : 'a' ;\n" + "C : 'c' ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "s", "aa", debug); assertEquals("S.x\n" + "T.y\n", found); } @Test public void testDelegatesSeeSameTokenType2() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + // A, B, C token type order "tokens { A; B; C; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "tokens { C; B; A; }\n" + // reverse order "y : A {System.out.println(\"T.y\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave2); String master = "grammar M;\n" + "import S,T;\n" + "s : x y ;\n" + // matches AA, which should be "aa" "B : 'b' ;\n" + // another order: B, A, C "A : 'a' ;\n" + "C : 'c' ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, WS=7]"; String expectedStringLiteralToTypeMap = "{}"; String expectedTypeToTokenList = "[A, B, C, WS]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); } @Test public void testCombinedImportsCombined() throws Exception { // for now, we don't allow combined to import combined ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "grammar S;\n" + // A, B, C token type order "tokens { A; B; C; }\n" + "x : 'x' INT {System.out.println(\"S.x\");} ;\n" + "INT : '0'..'9'+ ;\n" + "WS : (' '|'\\n') {skip();} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : x INT ;\n"; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: combined grammar M cannot import combined grammar S"; assertEquals("unexpected errors: "+equeue, expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+","")); } @Test public void testSameStringTwoNames() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + "tokens { A='a'; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "tokens { X='a'; }\n" + "y : X {System.out.println(\"T.y\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave2); String master = "grammar M;\n" + "import S,T;\n" + "s : x y ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); String expectedTokenIDToTypeMap = "[A=4, WS=5, X=6]"; String expectedStringLiteralToTypeMap = "{'a'=4}"; String expectedTypeToTokenList = "[A, WS, X]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); Object expectedArg = "X='a'"; Object expectedArg2 = "A"; int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_CONFLICT; GrammarSemanticsMessage expectedMessage = new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2); checkGrammarSemanticsError(equeue, expectedMessage); assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); String expectedError = "error(158): T.g:2:10: cannot alias X='a'; string already assigned to A"; assertEquals(expectedError, equeue.errors.get(0).toString()); } @Test public void testSameNameTwoStrings() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + "tokens { A='a'; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "tokens { A='x'; }\n" + "y : A {System.out.println(\"T.y\");} ;\n"; writeFile(tmpdir, "T.g", slave2); String master = "grammar M;\n" + "import S,T;\n" + "s : x y ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); String expectedTokenIDToTypeMap = "[A=4, T__6=6, WS=5]"; String expectedStringLiteralToTypeMap = "{'a'=4, 'x'=6}"; String expectedTypeToTokenList = "[A, WS, T__6]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, sortMapToString(g.composite.stringLiteralToTypeMap)); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); Object expectedArg = "A='x'"; Object expectedArg2 = "'a'"; int expectedMsgID = ErrorManager.MSG_TOKEN_ALIAS_REASSIGNMENT; GrammarSemanticsMessage expectedMessage = new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg, expectedArg2); checkGrammarSemanticsError(equeue, expectedMessage); assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); String expectedError = "error(159): T.g:2:10: cannot alias A='x'; token name already assigned to 'a'"; assertEquals(expectedError, equeue.errors.get(0).toString()); } @Test public void testImportedTokenVocabIgnoredWithWarning() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + "options {tokenVocab=whatever;}\n" + "tokens { A='a'; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); Object expectedArg = "S"; int expectedMsgID = ErrorManager.MSG_TOKEN_VOCAB_IN_DELEGATE; GrammarSemanticsMessage expectedMessage = new GrammarSemanticsMessage(expectedMsgID, g, null, expectedArg); checkGrammarSemanticsWarning(equeue, expectedMessage); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); assertEquals("unexpected errors: "+equeue, 1, equeue.warnings.size()); String expectedError = "warning(160): S.g:2:10: tokenVocab option ignored in imported grammar S"; assertEquals(expectedError, equeue.warnings.get(0).toString()); } @Test public void testImportedTokenVocabWorksInRoot() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + "tokens { A='a'; }\n" + "x : A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String tokens = "A=99\n"; writeFile(tmpdir, "Test.tokens", tokens); String master = "grammar M;\n" + "options {tokenVocab=Test;}\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); String expectedTokenIDToTypeMap = "[A=99, WS=101]"; String expectedStringLiteralToTypeMap = "{'a'=100}"; String expectedTypeToTokenList = "[A, 'a', WS]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); } @Test public void testSyntaxErrorsInImportsNotThrownOut() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + "options {toke\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); // whole bunch of errors from bad S.g file assertEquals("unexpected errors: "+equeue, 5, equeue.errors.size()); } @Test public void testSyntaxErrorsInImportsNotThrownOut2() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar S;\n" + ": A {System.out.println(\"S.x\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "s : x ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); // whole bunch of errors from bad S.g file assertEquals("unexpected errors: "+equeue, 3, equeue.errors.size()); } @Test public void testDelegatorRuleOverridesDelegate() throws Exception { String slave = "parser grammar S;\n" + "a : b {System.out.println(\"S.a\");} ;\n" + "b : B ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "b : 'b'|'c' ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "a", "c", debug); assertEquals("S.a\n", found); } @Test public void testDelegatorRuleOverridesLookaheadInDelegate() throws Exception { String slave = "parser grammar JavaDecl;\n" + "type : 'int' ;\n" + "decl : type ID ';'\n" + " | type ID init ';' {System.out.println(\"JavaDecl: \"+$decl.text);}\n" + " ;\n" + "init : '=' INT ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "JavaDecl.g", slave); String master = "grammar Java;\n" + "import JavaDecl;\n" + "prog : decl ;\n" + "type : 'int' | 'float' ;\n" + "\n" + "ID : 'a'..'z'+ ;\n" + "INT : '0'..'9'+ ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; // for float to work in decl, type must be overridden String found = execParser("Java.g", master, "JavaParser", "JavaLexer", "prog", "float x = 3;", debug); assertEquals("JavaDecl: floatx=3;\n", found); } @Test public void testDelegatorRuleOverridesDelegates() throws Exception { String slave = "parser grammar S;\n" + "a : b {System.out.println(\"S.a\");} ;\n" + "b : B ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String slave2 = "parser grammar T;\n" + "tokens { A='x'; }\n" + "b : B {System.out.println(\"T.b\");} ;\n"; writeFile(tmpdir, "T.g", slave2); String master = "grammar M;\n" + "import S, T;\n" + "b : 'b'|'c' {System.out.println(\"M.b\");}|B|A ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "a", "c", debug); assertEquals("M.b\n" + "S.a\n", found); } // LEXER INHERITANCE @Test public void testLexerDelegatorInvokesDelegateRule() throws Exception { String slave = "lexer grammar S;\n" + "A : 'a' {System.out.println(\"S.A\");} ;\n" + "C : 'c' ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "lexer grammar M;\n" + "import S;\n" + "B : 'b' ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execLexer("M.g", master, "M", "abc", debug); assertEquals("S.A\nabc\n", found); } @Test public void testLexerDelegatorRuleOverridesDelegate() throws Exception { String slave = "lexer grammar S;\n" + "A : 'a' {System.out.println(\"S.A\");} ;\n" + "B : 'b' {System.out.println(\"S.B\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "lexer grammar M;\n" + "import S;\n" + "A : 'a' B {System.out.println(\"M.A\");} ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execLexer("M.g", master, "M", "ab", debug); assertEquals("S.B\n" + "M.A\n" + "ab\n", found); } @Test public void testLexerDelegatorRuleOverridesDelegateLeavingNoRules() throws Exception { // M.Tokens has nothing to predict tokens from S. Should // not include S.Tokens alt in this case? String slave = "lexer grammar S;\n" + "A : 'a' {System.out.println(\"S.A\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "lexer grammar M;\n" + "import S;\n" + "A : 'a' {System.out.println(\"M.A\");} ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; writeFile(tmpdir, "/M.g", master); ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); composite.assignTokenTypes(); composite.defineGrammarSymbols(); composite.createNFAs(); g.createLookaheadDFAs(false); // predict only alts from M not S String expectingDFA = ".s0-'a'->.s1\n" + ".s0-{'\\n', ' '}->:s3=>2\n" + ".s1-<EOT>->:s2=>1\n"; org.antlr.analysis.DFA dfa = g.getLookaheadDFA(1); FASerializer serializer = new FASerializer(g); String result = serializer.serialize(dfa.startState); assertEquals(expectingDFA, result); // must not be a "unreachable alt: Tokens" error assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); } @Test public void testInvalidImportMechanism() throws Exception { // M.Tokens has nothing to predict tokens from S. Should // not include S.Tokens alt in this case? String slave = "lexer grammar S;\n" + "A : 'a' {System.out.println(\"S.A\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "tree grammar M;\n" + "import S;\n" + "a : A ;"; writeFile(tmpdir, "/M.g", master); ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); assertEquals("unexpected errors: "+equeue, 1, equeue.errors.size()); assertEquals("unexpected errors: "+equeue, 0, equeue.warnings.size()); String expectedError = "error(161): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+"/M.g:2:8: tree grammar M cannot import lexer grammar S"; assertEquals(expectedError, equeue.errors.get(0).toString().replaceFirst("\\-[0-9]+","")); } @Test public void testSyntacticPredicateRulesAreNotInherited() throws Exception { // if this compiles, it means that synpred1_S is defined in S.java // but not MParser.java. MParser has its own synpred1_M which must // be separate to compile. String slave = "parser grammar S;\n" + "a : 'a' {System.out.println(\"S.a1\");}\n" + " | 'a' {System.out.println(\"S.a2\");}\n" + " ;\n" + "b : 'x' | 'y' {;} ;\n"; // preds generated but not need in DFA here mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "options {backtrack=true;}\n" + "import S;\n" + "start : a b ;\n" + "nonsense : 'q' | 'q' {;} ;" + // forces def of preds here in M "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "start", "ax", debug); assertEquals("S.a1\n", found); } @Test public void testKeywordVSIDGivesNoWarning() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "lexer grammar S;\n" + "A : 'abc' {System.out.println(\"S.A\");} ;\n" + "ID : 'a'..'z'+ ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "a : A {System.out.println(\"M.a\");} ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; String found = execParser("M.g", master, "MParser", "MLexer", "a", "abc", debug); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); assertEquals("unexpected warnings: "+equeue, 0, equeue.warnings.size()); assertEquals("S.A\nM.a\n", found); } @Test public void testWarningForUndefinedToken() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "lexer grammar S;\n" + "A : 'abc' {System.out.println(\"S.A\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "a : ABC A {System.out.println(\"M.a\");} ;\n" + "WS : (' '|'\\n') {skip();} ;\n" ; // A is defined in S but M should still see it and not give warning. // only problem is ABC. rawGenerateAndBuildRecognizer("M.g", master, "MParser", "MLexer", debug); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); assertEquals("unexpected warnings: "+equeue, 1, equeue.warnings.size()); String expectedError = "warning(105): "+tmpdir.toString().replaceFirst("\\-[0-9]+","")+File.separator+"M.g:3:5: no lexer rule corresponding to token: ABC"; assertEquals(expectedError, equeue.warnings.get(0).toString().replaceFirst("\\-[0-9]+","")); } /** Make sure that M can import S that imports T. */ @Test public void test3LevelImport() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar T;\n" + "a : T ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave); String slave2 = "parser grammar S;\n" + // A, B, C token type order "import T;\n" + "a : S ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave2); String master = "grammar M;\n" + "import S;\n" + "a : M ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); g.composite.defineGrammarSymbols(); String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]"; String expectedStringLiteralToTypeMap = "{}"; String expectedTypeToTokenList = "[M, S, T]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); boolean ok = rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false); boolean expecting = true; // should be ok assertEquals(expecting, ok); } @Test public void testBigTreeOfImports() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar T;\n" + "x : T ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave); slave = "parser grammar S;\n" + "import T;\n" + "y : S ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); slave = "parser grammar C;\n" + "i : C ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "C.g", slave); slave = "parser grammar B;\n" + "j : B ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "B.g", slave); slave = "parser grammar A;\n" + "import B,C;\n" + "k : A ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "A.g", slave); String master = "grammar M;\n" + "import S,A;\n" + "a : M ;\n" ; writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); g.composite.defineGrammarSymbols(); String expectedTokenIDToTypeMap = "[A=4, B=5, C=6, M=7, S=8, T=9]"; String expectedStringLiteralToTypeMap = "{}"; String expectedTypeToTokenList = "[A, B, C, M, S, T]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); boolean ok = rawGenerateAndBuildRecognizer("M.g", master, "MParser", null, false); boolean expecting = true; // should be ok assertEquals(expecting, ok); } @Test public void testRulesVisibleThroughMultilevelImport() throws Exception { ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String slave = "parser grammar T;\n" + "x : T ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "T.g", slave); String slave2 = "parser grammar S;\n" + // A, B, C token type order "import T;\n" + "a : S ;\n" ; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave2); String master = "grammar M;\n" + "import S;\n" + "a : M x ;\n" ; // x MUST BE VISIBLE TO M writeFile(tmpdir, "M.g", master); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/M.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); g.composite.defineGrammarSymbols(); String expectedTokenIDToTypeMap = "[M=4, S=5, T=6]"; String expectedStringLiteralToTypeMap = "{}"; String expectedTypeToTokenList = "[M, S, T]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); } @Test public void testNestedComposite() throws Exception { // Wasn't compiling. http://www.antlr.org/jira/browse/ANTLR-438 ErrorQueue equeue = new ErrorQueue(); ErrorManager.setErrorListener(equeue); String gstr = "lexer grammar L;\n" + "T1: '1';\n" + "T2: '2';\n" + "T3: '3';\n" + "T4: '4';\n" ; mkdir(tmpdir); writeFile(tmpdir, "L.g", gstr); gstr = "parser grammar G1;\n" + "s: a | b;\n" + "a: T1;\n" + "b: T2;\n" ; mkdir(tmpdir); writeFile(tmpdir, "G1.g", gstr); gstr = "parser grammar G2;\n" + "import G1;\n" + "a: T3;\n" ; mkdir(tmpdir); writeFile(tmpdir, "G2.g", gstr); String G3str = "grammar G3;\n" + "import G2;\n" + "b: T4;\n" ; mkdir(tmpdir); writeFile(tmpdir, "G3.g", G3str); Tool antlr = newTool(new String[] {"-lib", tmpdir}); CompositeGrammar composite = new CompositeGrammar(); Grammar g = new Grammar(antlr,tmpdir+"/G3.g",composite); composite.setDelegationRoot(g); g.parseAndBuildAST(); g.composite.assignTokenTypes(); g.composite.defineGrammarSymbols(); String expectedTokenIDToTypeMap = "[T1=4, T2=5, T3=6, T4=7]"; String expectedStringLiteralToTypeMap = "{}"; String expectedTypeToTokenList = "[T1, T2, T3, T4]"; assertEquals(expectedTokenIDToTypeMap, realElements(g.composite.tokenIDToTypeMap).toString()); assertEquals(expectedStringLiteralToTypeMap, g.composite.stringLiteralToTypeMap.toString()); assertEquals(expectedTypeToTokenList, realElements(g.composite.typeToTokenList).toString()); assertEquals("unexpected errors: "+equeue, 0, equeue.errors.size()); boolean ok = rawGenerateAndBuildRecognizer("G3.g", G3str, "G3Parser", null, false); boolean expecting = true; // should be ok assertEquals(expecting, ok); } @Test public void testHeadersPropogatedCorrectlyToImportedGrammars() throws Exception { String slave = "parser grammar S;\n" + "a : B {System.out.print(\"S.a\");} ;\n"; mkdir(tmpdir); writeFile(tmpdir, "S.g", slave); String master = "grammar M;\n" + "import S;\n" + "@header{package mypackage;}\n" + "@lexer::header{package mypackage;}\n" + "s : a ;\n" + "B : 'b' ;" + // defines B from inherited token space "WS : (' '|'\\n') {skip();} ;\n" ; boolean ok = antlr("M.g", "M.g", master, debug); boolean expecting = true; // should be ok assertEquals(expecting, ok); } }
<?php /* +--------------------------------------------------------------------+ | CiviCRM version 4.4 | +--------------------------------------------------------------------+ | Copyright CiviCRM LLC (c) 2004-2013 | +--------------------------------------------------------------------+ | This file is a part of CiviCRM. | | | | CiviCRM is free software; you can copy, modify, and distribute it | | under the terms of the GNU Affero General Public License | | Version 3, 19 November 2007 and the CiviCRM Licensing Exception. | | | | CiviCRM is distributed in the hope that it will be useful, but | | WITHOUT ANY WARRANTY; without even the implied warranty of | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | | See the GNU Affero General Public License for more details. | | | | You should have received a copy of the GNU Affero General Public | | License and the CiviCRM Licensing Exception along | | with this program; if not, contact CiviCRM LLC | | at info[AT]civicrm[DOT]org. If you have questions about the | | GNU Affero General Public License or the licensing of CiviCRM, | | see the CiviCRM license FAQ at http://civicrm.org/licensing | +--------------------------------------------------------------------+ */ /** * * @package CRM * @copyright CiviCRM LLC (c) 2004-2013 * * Generated from xml/schema/CRM/Financial/PaymentProcessor.xml * DO NOT EDIT. Generated by GenCode.php */ require_once 'CRM/Core/DAO.php'; require_once 'CRM/Utils/Type.php'; class CRM_Financial_DAO_PaymentProcessor extends CRM_Core_DAO { /** * static instance to hold the table name * * @var string * @static */ static $_tableName = 'civicrm_payment_processor'; /** * static instance to hold the field values * * @var array * @static */ static $_fields = null; /** * static instance to hold the keys used in $_fields for each field. * * @var array * @static */ static $_fieldKeys = null; /** * static instance to hold the FK relationships * * @var string * @static */ static $_links = null; /** * static instance to hold the values that can * be imported * * @var array * @static */ static $_import = null; /** * static instance to hold the values that can * be exported * * @var array * @static */ static $_export = null; /** * static value to see if we should log any modifications to * this table in the civicrm_log table * * @var boolean * @static */ static $_log = false; /** * Payment Processor ID * * @var int unsigned */ public $id; /** * Which Domain is this match entry for * * @var int unsigned */ public $domain_id; /** * Payment Processor Name. * * @var string */ public $name; /** * Payment Processor Description. * * @var string */ public $description; /** * * @var int unsigned */ public $payment_processor_type_id; /** * Is this processor active? * * @var boolean */ public $is_active; /** * Is this processor the default? * * @var boolean */ public $is_default; /** * Is this processor for a test site? * * @var boolean */ public $is_test; /** * * @var string */ public $user_name; /** * * @var string */ public $password; /** * * @var string */ public $signature; /** * * @var string */ public $url_site; /** * * @var string */ public $url_api; /** * * @var string */ public $url_recur; /** * * @var string */ public $url_button; /** * * @var string */ public $subject; /** * * @var string */ public $class_name; /** * Billing Mode * * @var int unsigned */ public $billing_mode; /** * Can process recurring contributions * * @var boolean */ public $is_recur; /** * Payment Type: Credit or Debit * * @var int unsigned */ public $payment_type; /** * class constructor * * @access public * @return civicrm_payment_processor */ function __construct() { $this->__table = 'civicrm_payment_processor'; parent::__construct(); } /** * return foreign keys and entity references * * @static * @access public * @return array of CRM_Core_EntityReference */ static function getReferenceColumns() { if (!self::$_links) { self::$_links = array( new CRM_Core_EntityReference(self::getTableName() , 'domain_id', 'civicrm_domain', 'id') , new CRM_Core_EntityReference(self::getTableName() , 'payment_processor_type_id', 'civicrm_payment_processor_type', 'id') , ); } return self::$_links; } /** * returns all the column names of this table * * @access public * @return array */ static function &fields() { if (!(self::$_fields)) { self::$_fields = array( 'id' => array( 'name' => 'id', 'type' => CRM_Utils_Type::T_INT, 'required' => true, ) , 'domain_id' => array( 'name' => 'domain_id', 'type' => CRM_Utils_Type::T_INT, 'required' => true, 'FKClassName' => 'CRM_Core_DAO_Domain', ) , 'name' => array( 'name' => 'name', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Payment Processor') , 'maxlength' => 64, 'size' => CRM_Utils_Type::BIG, ) , 'description' => array( 'name' => 'description', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Description') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'payment_processor_type_id' => array( 'name' => 'payment_processor_type_id', 'type' => CRM_Utils_Type::T_INT, 'FKClassName' => 'CRM_Financial_DAO_PaymentProcessorType', ) , 'is_active' => array( 'name' => 'is_active', 'type' => CRM_Utils_Type::T_BOOLEAN, ) , 'is_default' => array( 'name' => 'is_default', 'type' => CRM_Utils_Type::T_BOOLEAN, ) , 'is_test' => array( 'name' => 'is_test', 'type' => CRM_Utils_Type::T_BOOLEAN, ) , 'user_name' => array( 'name' => 'user_name', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('User Name') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'password' => array( 'name' => 'password', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Password') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'signature' => array( 'name' => 'signature', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Signature') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'url_site' => array( 'name' => 'url_site', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Site URL') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'url_api' => array( 'name' => 'url_api', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('API URL') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'url_recur' => array( 'name' => 'url_recur', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Recurring Payments URL') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'url_button' => array( 'name' => 'url_button', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Button URL') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'subject' => array( 'name' => 'subject', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Subject') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'class_name' => array( 'name' => 'class_name', 'type' => CRM_Utils_Type::T_STRING, 'title' => ts('Suffix for PHP clas name implementation') , 'maxlength' => 255, 'size' => CRM_Utils_Type::HUGE, ) , 'billing_mode' => array( 'name' => 'billing_mode', 'type' => CRM_Utils_Type::T_INT, 'title' => ts('Billing Mode') , 'required' => true, ) , 'is_recur' => array( 'name' => 'is_recur', 'type' => CRM_Utils_Type::T_BOOLEAN, ) , 'payment_type' => array( 'name' => 'payment_type', 'type' => CRM_Utils_Type::T_INT, 'title' => ts('Payment Type') , 'default' => '1', ) , ); } return self::$_fields; } /** * Returns an array containing, for each field, the arary key used for that * field in self::$_fields. * * @access public * @return array */ static function &fieldKeys() { if (!(self::$_fieldKeys)) { self::$_fieldKeys = array( 'id' => 'id', 'domain_id' => 'domain_id', 'name' => 'name', 'description' => 'description', 'payment_processor_type_id' => 'payment_processor_type_id', 'is_active' => 'is_active', 'is_default' => 'is_default', 'is_test' => 'is_test', 'user_name' => 'user_name', 'password' => 'password', 'signature' => 'signature', 'url_site' => 'url_site', 'url_api' => 'url_api', 'url_recur' => 'url_recur', 'url_button' => 'url_button', 'subject' => 'subject', 'class_name' => 'class_name', 'billing_mode' => 'billing_mode', 'is_recur' => 'is_recur', 'payment_type' => 'payment_type', ); } return self::$_fieldKeys; } /** * returns the names of this table * * @access public * @static * @return string */ static function getTableName() { return self::$_tableName; } /** * returns if this table needs to be logged * * @access public * @return boolean */ function getLog() { return self::$_log; } /** * returns the list of fields that can be imported * * @access public * return array * @static */ static function &import($prefix = false) { if (!(self::$_import)) { self::$_import = array(); $fields = self::fields(); foreach($fields as $name => $field) { if (CRM_Utils_Array::value('import', $field)) { if ($prefix) { self::$_import['payment_processor'] = & $fields[$name]; } else { self::$_import[$name] = & $fields[$name]; } } } } return self::$_import; } /** * returns the list of fields that can be exported * * @access public * return array * @static */ static function &export($prefix = false) { if (!(self::$_export)) { self::$_export = array(); $fields = self::fields(); foreach($fields as $name => $field) { if (CRM_Utils_Array::value('export', $field)) { if ($prefix) { self::$_export['payment_processor'] = & $fields[$name]; } else { self::$_export[$name] = & $fields[$name]; } } } } return self::$_export; } }
<?php /* * This file is part of EC-CUBE * * Copyright(c) 2000-2014 LOCKON CO.,LTD. All Rights Reserved. * * http://www.lockon.co.jp/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ require_once '../require.php'; require_once CLASS_EX_REALDIR . 'page_extends/entry/LC_Page_Entry_Kiyaku_Ex.php'; $objPage = new LC_Page_Entry_Kiyaku_Ex(); $objPage->init(); $objPage->process();
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2015 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "NMEA/Acceleration.hpp" void AccelerationState::Complement(const AccelerationState &add) { if (add.available && (!available || (add.real && !real))) { real = add.real; g_load = add.g_load; available = add.available; } }
## 2.2.4 * Fix `prepend()` on empty `Root`. ## 2.2.3 * Allow to use object shortcut in `use()` with functions like `autoprefixer`. ## 2.2.2 * Add shortcut to set processors in `use()` via object with `.postcss` property. ## 2.2.1 * Send `opts` from `Processor#process(css, opts)` to processors. ## 2.2 “Marquis Cimeies” * Use GNU style syntax error messages. * Add `Node#replace` method. * Add `CssSyntaxError#reason` property. ## 2.1.2 * Fix UTF-8 support in inline source map. * Fix source map `sourcesContent` if there is no `from` and `to` options. ## 2.1.1 * Allow to miss `to` and `from` options for inline source maps. * Add `Node#source.id` if file name is unknown. * Better detect splitter between rules in CSS concatenation tools. * Automatically clone node in insert methods. ## 2.1 “King Amdusias” * Change Traceur ES6 compiler to ES6 Transpiler. * Show broken CSS line in syntax error. ## 2.0 “King Belial” * Project was rewritten from CoffeeScript to ES6. * Add Safe Mode to works with live input or with hacks from legacy code. * More safer parser to pass all hacks from Browserhacks.com. * Use real properties instead of magic getter/setter for raw propeties. ## 1.0 “Marquis Decarabia” * Save previous source map for each node to support CSS concatenation with multiple previous maps. * Add `map.sourcesContent` option to add origin content to `sourcesContent` inside map. * Allow to set different place of output map in annotation comment. * Allow to use arrays and `Root` in `Container#append` and same methods. * Add `Root#prevMap` with information about previous map. * Allow to use latest PostCSS from GitHub by npm. * `Result` now is lazy and it will stringify output CSS only if you use `css` or `map` property. * Use separated `map.prev` option to set previous map. * Rename `inlineMap` option to `map.inline`. * Rename `mapAnnotation` option to `map.annotation`. * `Result#map` now return `SourceMapGenerator` object, instead of string. * Run previous map autodetect only if input CSS contains annotation comment. * Add `map: 'inline'` shortcut for `map: { inline: true }` option. * `Node#source.file` now will contains absolute path. * Clean `Declaration#between` style on node clone. ## 0.3.5 * Allow to use `Root` or `Result` as first argument in `process()`. * Save parsed AST to `Result#root`. ## 0.3.4 * Better space symbol detect to read UTF-8 BOM correctly. ## 0.3.3 * Remove source map hacks by using new Mozilla’s `source-map` (by Simon Lydell). ## 0.3.2 * Add URI encoding support for inline source maps. ## 0.3.1 * Fix relative paths from previous source map. * Safer space split in `Rule#selectors` (by Simon Lydell). ## 0.3 “Prince Seere” * Add `Comment` node for comments between declarations or rules. * Add source map annotation comment to output CSS. * Allow to inline source map to annotation comment by data:uri. * Fix source maps on Windows. * Fix source maps for styles in subdirectory (by @nDmitry and @lydell). * Autodetect previous source map. * Add `first` and `last` shortcuts to container nodes. * Parse `!important` to separated property in `Declaration`. * Allow to break iteration by returning `false`. * Copy code style to new nodes. * Add `eachInside` method to recursivelly iterate all nodes. * Add `selectors` shortcut to get selectors array. * Add `toResult` method to `Rule` to simplify work with several input files. * Clean declaration’s `value`, rule’s `selector` and at-rule’s `params` by storing spaces in `between` property. ## 0.2 “Duke Dantalion” * Add source map support. * Add shortcuts to create nodes. * Method `process()` now returns object with `css` and `map` keys. * Origin CSS file option was renamed from `file` to `from`. * Rename `Node#remove()` method to `removeSelf()` to fix name conflict. * Node source was moved to `source` property with origin file and node end position. * You can set own stringify function. ## 0.1 “Count Andromalius” * Initial release.
<?php /* * main class of User Role Editor WordPress plugin * Author: Vladimir Garagulya * Author email: vladimir@shinephp.com * Author URI: http://shinephp.com * License: GPL v2+ * */ class User_Role_Editor { // common code staff, including options data processor protected $lib = null; // plugin's Settings page reference, we've got it from add_options_pages() call protected $setting_page_hook = null; // URE's key capability public $key_capability = 'not allowed'; /** * class constructor */ function __construct($library) { // activation action register_activation_hook(URE_PLUGIN_FULL_PATH, array($this, 'setup')); // deactivation action register_deactivation_hook(URE_PLUGIN_FULL_PATH, array($this, 'cleanup')); // get plugin specific library object $this->lib = $library; // Who may use this plugin $this->key_capability = $this->lib->get_key_capability(); if ($this->lib->multisite) { // new blog may be registered not at admin back-end only but automatically after new user registration, e.g. // Gravity Forms User Registration Addon does add_action( 'wpmu_new_blog', array($this, 'duplicate_roles_for_new_blog'), 10, 2); } if (!is_admin()) { return; } add_action('admin_init', array($this, 'plugin_init'), 1); // Add the translation function after the plugins loaded hook. add_action('plugins_loaded', array($this, 'load_translation')); // add own submenu add_action('admin_menu', array($this, 'plugin_menu')); if ($this->lib->multisite) { // add own submenu add_action('network_admin_menu', array($this, 'network_plugin_menu')); } // add a Settings link in the installed plugins page add_filter('plugin_action_links', array($this, 'plugin_action_links'), 10, 2); add_filter('plugin_row_meta', array($this, 'plugin_row_meta'), 10, 2); } // end of __construct() /** * Plugin initialization * */ public function plugin_init() { global $current_user; if (!empty($current_user->ID)) { $user_id = $current_user->ID; } else { $user_id = 0; } // these filters and actions should prevent editing users with administrator role // by other users with 'edit_users' capability if (!$this->lib->user_is_admin($user_id)) { // Exclude administrator role from edit list. add_filter('editable_roles', array($this, 'exclude_admin_role' ) ); // prohibit any actions with user who has Administrator role add_filter('user_has_cap', array($this, 'not_edit_admin' ), 10, 3); // exclude users with 'Administrator' role from users list add_action('pre_user_query', array($this, 'exclude_administrators' ) ); // do not show 'Administrator (s)' view above users list add_filter('views_users', array($this, 'exclude_admins_view' ) ); } add_action( 'admin_enqueue_scripts', array($this, 'admin_load_js' ) ); add_action( 'user_row_actions', array($this, 'user_row'), 10, 2 ); add_action( 'edit_user_profile', array($this, 'edit_user_profile'), 10, 2 ); add_filter( 'manage_users_columns', array($this, 'user_role_column'), 10, 1 ); add_filter( 'manage_users_custom_column', array($this, 'user_role_row'), 10, 3 ); add_action( 'profile_update', array($this, 'user_profile_update'), 10 ); add_filter( 'all_plugins', array($this, 'exclude_from_plugins_list' ) ); if ($this->lib->multisite) { add_action( 'wpmu_activate_user', array($this, 'add_other_default_roles'), 10, 1 ); $allow_edit_users_to_not_super_admin = $this->lib->get_option('allow_edit_users_to_not_super_admin', 0); if ($allow_edit_users_to_not_super_admin) { add_filter( 'map_meta_cap', array($this, 'restore_users_edit_caps'), 1, 4 ); remove_all_filters( 'enable_edit_any_user_configuration' ); add_filter( 'enable_edit_any_user_configuration', '__return_true'); add_filter( 'admin_head', array($this, 'edit_user_permission_check'), 1, 4 ); } } else { add_action( 'user_register', array($this, 'add_other_default_roles'), 10, 1 ); $count_users_without_role = $this->lib->get_option('count_users_without_role', 0); if ($count_users_without_role) { add_action( 'restrict_manage_users', array($this, 'move_users_from_no_role_button') ); add_action( 'admin_init', array($this, 'add_css_to_users_page')); add_action( 'admin_footer', array($this, 'add_js_to_users_page') ); } } add_action('wp_ajax_ure_ajax', array($this, 'ure_ajax')); } // end of plugin_init() public function move_users_from_no_role_button() { global $wpdb; if ( stripos($_SERVER['REQUEST_URI'], 'wp-admin/users.php')===false ) { return; } $id = get_current_blog_id(); $blog_prefix = $wpdb->get_blog_prefix($id); $query = "select count(ID) from {$wpdb->users} users where not exists (select user_id from {$wpdb->usermeta} where user_id=users.ID and meta_key='{$blog_prefix}capabilities') or exists (select user_id from wp_usermeta where user_id=users.ID and meta_key='{$blog_prefix}capabilities' and meta_value='a:0:{}') ;"; $users_count = $wpdb->get_var($query); if ($users_count>0) { ?> &nbsp;&nbsp;<input type="button" name="move_from_no_role" id="move_from_no_role" class="button" value="Without role (<?php echo $users_count;?>)" onclick="ure_move_users_from_no_role_dialog()"> <div id="move_from_no_role_dialog" class="ure-dialog"> <div id="move_from_no_role_content" style="padding: 10px;"> To: <select name="ure_new_role" id="ure_new_role"> <option value="no_rights">No rights</option> </select><br> </div> </div> <?php } } // end of move_users_from_no_role() public function add_css_to_users_page() { if ( stripos($_SERVER['REQUEST_URI'], 'wp-admin/users.php')===false ) { return; } wp_enqueue_style('wp-jquery-ui-dialog'); wp_enqueue_style('ure-admin-css', URE_PLUGIN_URL . 'css/ure-admin.css', array(), false, 'screen'); } public function add_js_to_users_page() { if ( stripos($_SERVER['REQUEST_URI'], 'wp-admin/users.php')===false ) { return; } wp_enqueue_script('jquery-ui-dialog', false, array('jquery-ui-core','jquery-ui-button', 'jquery') ); wp_register_script( 'ure-users-js', plugins_url( '/js/ure-users.js', URE_PLUGIN_FULL_PATH ) ); wp_enqueue_script ( 'ure-users-js' ); wp_localize_script( 'ure-users-js', 'ure_users_data', array( 'wp_nonce' => wp_create_nonce('user-role-editor'), 'move_from_no_role_title' => esc_html__('Change role for users without role', 'ure'), 'no_rights_caption' => esc_html__('No rights', 'ure'), 'provide_new_role_caption' => esc_html__('Provide new role', 'ure') )); } // end of add_js_to_users_page() public function add_other_default_roles($user_id) { if (empty($user_id)) { return; } $user = get_user_by('id', $user_id); if (empty($user->ID)) { return; } $other_default_roles = $this->lib->get_option('other_default_roles', array()); if (count($other_default_roles)==0) { return; } foreach($other_default_roles as $role) { $user->add_role($role); } } // end of add_other_default_roles() /** * restore edit_users, delete_users, create_users capabilities for non-superadmin users under multisite * (code is provided by http://wordpress.org/support/profile/sjobidoo) * * @param type $caps * @param type $cap * @param type $user_id * @param type $args * @return type */ public function restore_users_edit_caps($caps, $cap, $user_id, $args) { foreach ($caps as $key => $capability) { if ($capability != 'do_not_allow') continue; switch ($cap) { case 'edit_user': case 'edit_users': $caps[$key] = 'edit_users'; break; case 'delete_user': case 'delete_users': $caps[$key] = 'delete_users'; break; case 'create_users': $caps[$key] = $cap; break; } } return $caps; } // end of restore_user_edit_caps() /** * Checks that both the editing user and the user being edited are * members of the blog and prevents the super admin being edited. * (code is provided by http://wordpress.org/support/profile/sjobidoo) * */ function edit_user_permission_check() { global $current_user, $profileuser; if (is_super_admin()) { // Superadmin may do all return; } $screen = get_current_screen(); get_currentuserinfo(); if ($screen->base == 'user-edit' || $screen->base == 'user-edit-network') { // editing a user profile if (!is_super_admin($current_user->ID) && is_super_admin($profileuser->ID)) { // trying to edit a superadmin while himself is less than a superadmin wp_die(esc_html__('You do not have permission to edit this user.')); } elseif (!( is_user_member_of_blog($profileuser->ID, get_current_blog_id()) && is_user_member_of_blog($current_user->ID, get_current_blog_id()) )) { // editing user and edited user aren't members of the same blog wp_die(esc_html__('You do not have permission to edit this user.')); } } } // end of edit_user_permission_check() /** * exclude administrator role from the roles list * * @param string $roles * @return array */ public function exclude_admin_role($roles) { if (isset($roles['administrator'])) { unset($roles['administrator']); } return $roles; } // end of exclude_admin_role() /** * We have two vulnerable queries with user id at admin interface, which should be processed * 1st: http://blogdomain.com/wp-admin/user-edit.php?user_id=ID&wp_http_referer=%2Fwp-admin%2Fusers.php * 2nd: http://blogdomain.com/wp-admin/users.php?action=delete&user=ID&_wpnonce=ab34225a78 * If put Administrator user ID into such request, user with lower capabilities (if he has 'edit_users') * can edit, delete admin record * This function removes 'edit_users' capability from current user capabilities * if request has admin user ID in it * * @param array $allcaps * @param type $caps * @param string $name * @return array */ public function not_edit_admin($allcaps, $caps, $name) { $user_keys = array('user_id', 'user'); foreach ($user_keys as $user_key) { $access_deny = false; $user_id = $this->lib->get_request_var($user_key, 'get'); if (!empty($user_id)) { if ($user_id == 1) { // built-in WordPress Admin $access_deny = true; } else { if (!isset($this->lib->user_to_check[$user_id])) { // check if user_id has Administrator role $access_deny = $this->lib->has_administrator_role($user_id); } else { // user_id was checked already, get result from cash $access_deny = $this->lib->user_to_check[$user_id]; } } if ($access_deny) { unset($allcaps['edit_users']); } break; } } return $allcaps; } // end of not_edit_admin() /** * add where criteria to exclude users with 'Administrator' role from users list * * @global wpdb $wpdb * @param type $user_query */ public function exclude_administrators($user_query) { global $wpdb; $result = false; $links_to_block = array('profile.php', 'users.php'); foreach ( $links_to_block as $key => $value ) { $result = stripos($_SERVER['REQUEST_URI'], $value); if ( $result !== false ) { break; } } if ( $result===false ) { // block the user edit stuff only return; } // get user_id of users with 'Administrator' role $tableName = (!$this->lib->multisite && defined('CUSTOM_USER_META_TABLE')) ? CUSTOM_USER_META_TABLE : $wpdb->usermeta; $meta_key = $wpdb->prefix . 'capabilities'; $admin_role_key = '%"administrator"%'; $query = "select user_id from $tableName where meta_key='$meta_key' and meta_value like '$admin_role_key'"; $ids_arr = $wpdb->get_col($query); if (is_array($ids_arr) && count($ids_arr) > 0) { $ids = implode(',', $ids_arr); $user_query->query_where .= " AND ( $wpdb->users.ID NOT IN ( $ids ) )"; } } // end of exclude_administrators() /* * Exclude view of users with Administrator role * */ public function exclude_admins_view($views) { unset($views['administrator']); return $views; } // end of exclude_admins_view() /** * Add/hide edit actions for every user row at the users list * * @global type $pagenow * @global type $current_user * @param string $actions * @param type $user * @return string */ public function user_row($actions, $user) { global $pagenow, $current_user; if ($pagenow == 'users.php') { if ($current_user->has_cap($this->key_capability)) { $actions['capabilities'] = '<a href="' . wp_nonce_url("users.php?page=users-".URE_PLUGIN_FILE."&object=user&amp;user_id={$user->ID}", "ure_user_{$user->ID}") . '">' . esc_html__('Capabilities', 'ure') . '</a>'; } } return $actions; } // end of user_row() /** * every time when new blog created - duplicate to it roles from the main blog (1) * @global wpdb $wpdb * @global WP_Roles $wp_roles * @param int $blog_id * @param int $user_id * */ public function duplicate_roles_for_new_blog($blog_id) { global $wpdb, $wp_roles; // get Id of 1st (main) blog $main_blog_id = $this->lib->get_main_blog_id(); if ( empty($main_blog_id) ) { return; } $current_blog = $wpdb->blogid; switch_to_blog( $main_blog_id ); $main_roles = new WP_Roles(); // get roles from primary blog $default_role = get_option('default_role'); // get default role from primary blog switch_to_blog($blog_id); // switch to the new created blog $main_roles->use_db = false; // do not touch DB $main_roles->add_cap('administrator', 'dummy_123456'); // just to save current roles into new blog $main_roles->role_key = $wp_roles->role_key; $main_roles->use_db = true; // save roles into new blog DB $main_roles->remove_cap('administrator', 'dummy_123456'); // remove unneeded dummy capability update_option('default_role', $default_role); // set default role for new blog as it set for primary one switch_to_blog($current_blog); // return to blog where we were at the begin } // end of duplicate_roles_for_new_blog() /** * Filter out URE plugin from not superadmin users * @param type array $plugins plugins list * @return type array $plugins updated plugins list */ public function exclude_from_plugins_list($plugins) { global $current_user; $ure_key_capability = $this->lib->get_key_capability(); // if multi-site, then allow plugin activation for network superadmins and, if that's specially defined, - for single site administrators too if ($this->lib->user_has_capability($current_user, $ure_key_capability)) { return $plugins; } // exclude URE from plugins list foreach ($plugins as $key => $value) { if ($key == 'user-role-editor/' . URE_PLUGIN_FILE) { unset($plugins[$key]); break; } } return $plugins; } // end of exclude_from_plugins_list() /** * Load plugin translation files - linked to the 'plugins_loaded' action * */ function load_translation() { load_plugin_textdomain('ure', '', dirname( plugin_basename( URE_PLUGIN_FULL_PATH ) ) .'/lang'); } // end of ure_load_translation() /** * Modify plugin actions link * * @param array $links * @param string $file * @return array */ public function plugin_action_links($links, $file) { if ($file == plugin_basename(dirname(URE_PLUGIN_FULL_PATH).'/'.URE_PLUGIN_FILE)) { $settings_link = "<a href='options-general.php?page=settings-".URE_PLUGIN_FILE."'>" . esc_html__('Settings', 'ure') . "</a>"; array_unshift($links, $settings_link); } return $links; } // end of plugin_action_links() public function plugin_row_meta($links, $file) { if ($file == plugin_basename(dirname(URE_PLUGIN_FULL_PATH) .'/'.URE_PLUGIN_FILE)) { $links[] = '<a target="_blank" href="http://role-editor.com/changelog">' . esc_html__('Changelog', 'ure') . '</a>'; } return $links; } // end of plugin_row_meta public function settings_screen_configure() { $settings_page_hook = $this->settings_page_hook; if (is_multisite()) { $settings_page_hook .= '-network'; } $screen = get_current_screen(); // Check if current screen is URE's settings page if ($screen->id != $settings_page_hook) { return; } $screen_help = new Ure_Screen_Help(); $screen->add_help_tab( array( 'id' => 'overview', 'title' => esc_html__('Overview'), 'content' => $screen_help->get_settings_help('overview') )); } // end of settings_screen_configure() public function plugin_menu() { $translated_title = esc_html__('User Role Editor', 'ure'); if (function_exists('add_submenu_page')) { $ure_page = add_submenu_page( 'users.php', $translated_title, $translated_title, $this->key_capability, 'users-' . URE_PLUGIN_FILE, array($this, 'edit_roles')); add_action("admin_print_styles-$ure_page", array($this, 'admin_css_action')); } if (!$this->lib->multisite) { $this->settings_page_hook = add_options_page( $translated_title, $translated_title, $this->key_capability, 'settings-' . URE_PLUGIN_FILE, array($this, 'settings')); add_action( 'load-'.$this->settings_page_hook, array($this,'settings_screen_configure') ); add_action("admin_print_styles-{$this->settings_page_hook}", array($this, 'admin_css_action')); } } // end of plugin_menu() public function network_plugin_menu() { if (is_multisite()) { $translated_title = esc_html__('User Role Editor', 'ure'); $this->settings_page_hook = add_submenu_page( 'settings.php', $translated_title, $translated_title, $this->key_capability, 'settings-' . URE_PLUGIN_FILE, array(&$this, 'settings')); add_action( 'load-'.$this->settings_page_hook, array($this,'settings_screen_configure') ); add_action("admin_print_styles-{$this->settings_page_hook}", array($this, 'admin_css_action')); } } // end of network_plugin_menu() protected function get_settings_action() { $action = 'show'; $update_buttons = array('ure_settings_update', 'ure_addons_settings_update', 'ure_settings_ms_update', 'ure_default_roles_update'); foreach($update_buttons as $update_button) { if (!isset($_POST[$update_button])) { continue; } if (!wp_verify_nonce($_POST['_wpnonce'], 'user-role-editor')) { wp_die('Security check failed'); } $action = $update_button; break; } return $action; } // end of get_settings_action() /** * Update General Options tab */ protected function update_general_options() { if (defined('URE_SHOW_ADMIN_ROLE') && (URE_SHOW_ADMIN_ROLE == 1)) { $show_admin_role = 1; } else { $show_admin_role = $this->lib->get_request_var('show_admin_role', 'checkbox'); } $this->lib->put_option('show_admin_role', $show_admin_role); $caps_readable = $this->lib->get_request_var('caps_readable', 'checkbox'); $this->lib->put_option('ure_caps_readable', $caps_readable); $show_deprecated_caps = $this->lib->get_request_var('show_deprecated_caps', 'checkbox'); $this->lib->put_option('ure_show_deprecated_caps', $show_deprecated_caps); do_action('ure_settings_update1'); $this->lib->flush_options(); $this->lib->show_message(esc_html__('User Role Editor options are updated', 'ure')); } // end of update_general_options() /** * Update Additional Modules Options tab */ protected function update_addons_options() { if (!$this->lib->multisite) { $count_users_without_role = $this->lib->get_request_var('count_users_without_role', 'checkbox'); $this->lib->put_option('count_users_without_role', $count_users_without_role); } do_action('ure_settings_update2'); $this->lib->flush_options(); $this->lib->show_message(esc_html__('User Role Editor options are updated', 'ure')); } // end of update_addons_options() protected function update_default_roles() { global $wp_roles; // Primary default role $primary_default_role = $this->lib->get_request_var('default_user_role', 'post'); if (!empty($primary_default_role) && isset($wp_roles->role_objects[$primary_default_role]) && $primary_default_role !== 'administrator') { update_option('default_role', $primary_default_role); } // Other default roles $other_default_roles = array(); foreach($_POST as $key=>$value) { $prefix = substr($key, 0, 8); if ($prefix!=='wp_role_') { continue; } $role_id = substr($key, 8); if ($role_id!=='administrator' && isset($wp_roles->role_objects[$role_id])) { $other_default_roles[] = $role_id; } } // foreach() $this->lib->put_option('other_default_roles', $other_default_roles, true); $this->lib->show_message(esc_html__('Default Roles are updated', 'ure')); } // end of update_default_roles() protected function update_multisite_options() { if (!$this->lib->multisite) { return; } $allow_edit_users_to_not_super_admin = $this->lib->get_request_var('allow_edit_users_to_not_super_admin', 'checkbox'); $this->lib->put_option('allow_edit_users_to_not_super_admin', $allow_edit_users_to_not_super_admin); do_action('ure_settings_ms_update'); $this->lib->flush_options(); $this->lib->show_message(esc_html__('User Role Editor options are updated', 'ure')); } // end of update_multisite_options() public function settings() { if (!current_user_can($this->key_capability)) { esc_html__( 'You do not have sufficient permissions to manage options for User Role Editor.', 'ure' ); } $action = $this->get_settings_action(); switch ($action) { case 'ure_settings_update': $this->update_general_options(); break; case 'ure_addons_settings_update': $this->update_addons_options(); break; case 'ure_settings_ms_update': $this->update_multisite_options(); break; case 'ure_default_roles_update': $this->update_default_roles(); case 'show': default: ; } // switch() if (defined('URE_SHOW_ADMIN_ROLE') && (URE_SHOW_ADMIN_ROLE == 1)) { $show_admin_role = 1; } else { $show_admin_role = $this->lib->get_option('show_admin_role', 0); } $caps_readable = $this->lib->get_option('ure_caps_readable', 0); $show_deprecated_caps = $this->lib->get_option('ure_show_deprecated_caps', 0); if ($this->lib->multisite) { $allow_edit_users_to_not_super_admin = $this->lib->get_option('allow_edit_users_to_not_super_admin', 0); } else { $count_users_without_role = $this->lib->get_option('count_users_without_role', 0); } $this->lib->get_default_role(); $this->lib->editor_init1(); $this->lib->role_edit_prepare_html(0); $ure_tab_idx = $this->lib->get_request_var('ure_tab_idx', 'int'); do_action('ure_settings_load'); if ($this->lib->multisite) { $link = 'settings.php'; } else { $link = 'options-general.php'; } require_once(URE_PLUGIN_DIR . 'includes/settings-template.php'); } // end of settings() public function admin_css_action() { wp_enqueue_style('wp-jquery-ui-dialog'); if (stripos($_SERVER['REQUEST_URI'], 'settings-user-role-editor')!==false) { wp_enqueue_style('ure-jquery-ui-tabs', URE_PLUGIN_URL . 'css/jquery-ui-1.10.4.custom.min.css', array(), false, 'screen'); } wp_enqueue_style('ure-admin-css', URE_PLUGIN_URL . 'css/ure-admin.css', array(), false, 'screen'); } // end of admin_css_action() // call roles editor page public function edit_roles() { global $current_user; if (!empty($current_user)) { $user_id = $current_user->ID; } else { $user_id = false; } $ure_key_capability = $this->lib->get_key_capability(); if (!$this->lib->user_has_capability($current_user, $ure_key_capability)) { die(esc_html__('Insufficient permissions to work with User Role Editor', 'ure')); } $this->lib->editor(); } // end of edit_roles() // move old version option to the new storage 'user_role_editor' option, array, containing all URE options private function convert_option($option_name) { $option_value = get_option($option_name, 0); delete_option($option_name); $this->lib->put_option( $option_name, $option_value ); } /** * execute on plugin activation */ function setup() { $this->convert_option('ure_caps_readable'); $this->convert_option('ure_show_deprecated_caps'); $this->convert_option('ure_hide_pro_banner'); $this->lib->flush_options(); $this->lib->make_roles_backup(); } // end of setup() /** * Load plugin javascript stuff * * @param string $hook_suffix */ public function admin_load_js($hook_suffix){ if (class_exists('User_Role_Editor_Pro')) { $ure_hook_suffixes = array('settings_page_settings-user-role-editor-pro', 'users_page_users-user-role-editor-pro'); } else { $ure_hook_suffixes = array('settings_page_settings-user-role-editor', 'users_page_users-user-role-editor'); } if (in_array($hook_suffix, $ure_hook_suffixes)) { wp_enqueue_script('jquery-ui-dialog', false, array('jquery-ui-core','jquery-ui-button', 'jquery') ); wp_enqueue_script('jquery-ui-tabs', false, array('jquery-ui-core', 'jquery') ); wp_register_script( 'ure-js', plugins_url( '/js/ure-js.js', URE_PLUGIN_FULL_PATH ) ); wp_enqueue_script ( 'ure-js' ); wp_localize_script( 'ure-js', 'ure_data', array( 'wp_nonce' => wp_create_nonce('user-role-editor'), 'page_url' => URE_WP_ADMIN_URL . URE_PARENT .'?page=users-'.URE_PLUGIN_FILE, 'is_multisite' => is_multisite() ? 1 : 0, 'select_all' => esc_html__('Select All', 'ure'), 'unselect_all' => esc_html__('Unselect All', 'ure'), 'reverse' => esc_html__('Reverse', 'ure'), 'update' => esc_html__('Update', 'ure'), 'confirm_submit' => esc_html__('Please confirm permissions update', 'ure'), 'add_new_role_title' => esc_html__('Add New Role', 'ure'), 'role_name_required' => esc_html__(' Role name (ID) can not be empty!', 'ure'), 'role_name_valid_chars' => esc_html__(' Role name (ID) must contain latin characters, digits, hyphens or underscore only!', 'ure'), 'numeric_role_name_prohibited' => esc_html__(' WordPress does not support numeric Role name (ID). Add latin characters to it.', 'ure'), 'add_role' => esc_html__('Add Role', 'ure'), 'delete_role' => esc_html__('Delete Role', 'ure'), 'cancel' => esc_html__('Cancel', 'ure'), 'add_capability' => esc_html__('Add Capability', 'ure'), 'delete_capability' => esc_html__('Delete Capability', 'ure'), 'reset' => esc_html__('Reset', 'ure'), 'reset_warning' => esc_html__('DANGER! Resetting will restore default settings from WordPress Core.','ure')."\n\n". esc_html__('If any plugins have changed capabilities in any way upon installation (such as S2Member, WooCommerce, and many more), those capabilities will be DELETED!', 'ure')."\n\n" . esc_html__('For more information on how to undo changes and restore plugin capabilities go to', 'ure')."\n". 'http://role-editor.com/how-to-restore-deleted-wordpress-user-roles/'."\n\n". esc_html__('Continue?', 'ure'), 'default_role' => esc_html__('Default Role', 'ure'), 'set_new_default_role' => esc_html__('Set New Default Role', 'ure'), 'delete_capability' => esc_html__('Delete Capability', 'ure'), 'delete_capability_warning' => esc_html__('Warning! Be careful - removing critical capability could crash some plugin or other custom code', 'ure'), 'capability_name_required' => esc_html__(' Capability name (ID) can not be empty!', 'ure'), 'capability_name_valid_chars' => esc_html__(' Capability name (ID) must contain latin characters, digits, hyphens or underscore only!', 'ure'), ) ); // load additional JS stuff for Pro version, if exists do_action('ure_load_js'); } } // end of admin_load_js() protected function is_user_profile_extention_allowed() { // Check if we are not at the network admin center $result = stripos($_SERVER['REQUEST_URI'], 'network/user-edit.php') == false; return $result; } // end of is_user_profile_extention_allowed() public function edit_user_profile($user) { global $current_user; if (!$this->is_user_profile_extention_allowed()) { return; } if (!$this->lib->user_is_admin($current_user->ID)) { return; } ?> <h3><?php _e('User Role Editor', 'ure'); ?></h3> <table class="form-table"> <tr> <th scope="row"><?php _e('Other Roles', 'ure'); ?></th> <td> <?php $roles = $this->lib->other_user_roles($user); if (is_array($roles) && count($roles) > 0) { foreach ($roles as $role) { echo '<input type="hidden" name="ure_other_roles[]" value="' . $role . '" />'; } } $output = $this->lib->roles_text($roles); echo $output . '&nbsp;&nbsp;&gt;&gt;&nbsp;<a href="' . wp_nonce_url("users.php?page=users-".URE_PLUGIN_FILE."&object=user&amp;user_id={$user->ID}", "ure_user_{$user->ID}") . '">' . esc_html__('Edit', 'ure') . '</a>'; ?> </td> </tr> </table> <?php } // end of edit_user_profile() /** * add 'Other Roles' column to WordPress users list table * * @param array $columns WordPress users list table columns list * @return array */ public function user_role_column($columns = array()) { $columns['ure_roles'] = esc_html__('Other Roles', 'ure'); return $columns; } // end of user_role_column() /** * Return user's roles list for display in the WordPress Users list table * * @param string $retval * @param string $column_name * @param int $user_id * * @return string all user roles */ public function user_role_row($retval = '', $column_name = '', $user_id = 0) { // Only looking for User Role Editor other user roles column if ('ure_roles' == $column_name) { $user = get_userdata($user_id); // Get the users roles $roles = $this->lib->other_user_roles($user); $retval = $this->lib->roles_text($roles); } // Pass retval through return $retval; } // end of user_role_row() // save additional user roles when user profile is updated, as WordPress itself doesn't know about them public function user_profile_update($user_id) { if (!current_user_can('edit_user', $user_id)) { return; } $user = get_userdata($user_id); if (isset($_POST['ure_other_roles'])) { $new_roles = array_intersect($user->roles, $_POST['ure_other_roles']); $skip_roles = array(); foreach ($new_roles as $role) { $skip_roles['$role'] = 1; } unset($new_roles); foreach ($_POST['ure_other_roles'] as $role) { if (!isset($skip_roles[$role])) { $user->add_role($role); } } } } // update_user_profile() public function ure_ajax() { require_once(URE_PLUGIN_DIR . 'includes/class-ajax-processor.php'); $ajax_processor = new URE_Ajax_Processor($this->lib); $ajax_processor->dispatch(); } // end of ure_ajax() // execute on plugin deactivation function cleanup() { } // end of setup() } // end of User_Role_Editor
<?php /** * ICE API: widget extensions, debugger template file * * @author Marshall Sorenson <marshall@presscrew.com> * @link http://infinity.presscrew.com/ * @copyright Copyright (C) 2010-2011 Marshall Sorenson * @license http://www.gnu.org/licenses/gpl.html GPLv2 or later * @package ICE-extensions * @subpackage widgets * @since 1.0 */ /* @var $this ICE_Widget_Renderer */ ?> <div <?php $this->render_attrs( 'ui-widget' ) ?>> <div class="ui-widget-header"> <?php $this->render_title() ?> </div> <div class="ui-widget-content"> <?php $this->component()->render_items() ?> </div> </div> <script type="text/javascript"> jQuery('div#<?php $this->render_id() ?> div.ui-widget-content') .jstree({ 'plugins': ['html_data','themeroller'], 'core': {'animation': 0} }); </script>
#!/bin/bash #*********************************************************** # Copyright (C) 2008 Hewlett-Packard Development Company, L.P. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # version 2 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #***********************************************************/ # # # install test data into the fosstester account for use in testing # # NOTE: assumes being executed from the sources and that the fosstester # account already exists! # # best if run as fosstester, root will work ok too. # # # NOTE: This script should be run as either the fosstester or root user # thisdir=`pwd` error_cnt=0 filelist='.bash_aliases .bashrc .subversion .svn' if [ -r ./TestData/fosstester/ReadMe ] then for file in $filelist do cp -r ./TestData/fosstester/$file /home/fosstester/ > /dev/null 2>&1 done else echo "ERROR! fosstester environment could not be found in $thisdir/TestData/fosstester/" let $error_cnt += 1 fi if [ -d ./TestData/archives ] then # need to suppress .svn and .subversion errors as we are copying from source cp -R ./TestData/archives ~fosstester > /dev/null 2>&1 else echo "ERROR! no $thisdir/TestData/archives directory found, could not install archives for testing" let $error_cnt += 1 fi if [ -d ./TestData/licenses ] then cp -R ./TestData/licenses ~fosstester > /dev/null 2>&1 else echo "ERROR! no $thisdir/TestData/license directory found, could not install licenses for testing" let $error_cnt += 1 fi # # copy selected archives to other places for other tests # mkdir -p ~fosstester/public_html if [ "$?" -ne 0 ] then echo "ERROR!, could not create ~fosstester/public_html" let $error_cnt += 1 fi cp ~fosstester/archives/fossDirsOnly.tar.bz2 ~fosstester/public_html if [ "$?" -ne 0 ] then echo "ERROR!, could not copy fossDirsOnly.tar.bz2 to fosstester/public_html" let $error_cnt += 1 fi mkdir -p ~fosstester/eddy if [ "$?" -ne 0 ] then echo "ERROR!, could not create ~fosstester/eddy" let $error_cnt += 1 fi cd ~fosstester/eddy tar -xf ~fosstester/archives/eddyData.tar.bz2 if [ "$?" -ne 0 ] then echo "ERROR!, tar returned an error unpacking ~fosstester/archives/eddyData.tar.bz2" let $error_cnt += 1 fi # # now make a test dir in licenses for server upload testing # cd ~fosstester/licenses mkdir -p Tdir cp BSD_style_* Tdir # # download simpletest into ~fosstester/archives, don't depend on the user # to have set a proxy. Just set it. # cd /home/fosstester/archives if [ -e 'simpletest_1.0.1.tar.gz' ] then echo "NOTE: simpletest already downloaded, skipping" else export export https_proxy='http://lart.fc.hp.com:3128/' export http_proxy=http://lart.fc.hp.com:3128/ export ftp_proxy=http://lart.fc.hp.com:3128/ echo "downloading simpletest" sh -c "wget -q 'http://downloads.sourceforge.net/simpletest/simpletest_1.0.1.tar.gz'" fi # # make test automation reporting directories under public_html # echo "making reporting directories under ~fosstester/public_html" LPath='/home/fosstester/public_html/TestResults/Data/Latest' mkdir -p $LPath if [ "$?" -ne 0 ] then echo "ERROR when creating $LPath" exit 1 fi mkdir -p '/home/fosstester/public_html/unitTests' Path='/home/fosstester/public_html/TestResults/Data' mdirs='01 02 03 04 05 06 07 08 09 10 11 12 2008' for dir in $mdirs do mkdir -p "$Path/$dir" done # # make sure fosstester owns things and folks can read them. # cd ~fosstester chown -R fosstester:fosstester archives licenses public_html chmod -R a+rwx archives licenses public_html if [ $error_cnt -ne 0 ] then echo "There were previous errors, will exit with 1 (Fail)" exit 1 fi exit 0
<?php /** * @file * Definition of Drupal\aggregator\Tests\UpdateFeedTest. */ namespace Drupal\aggregator\Tests; /** * Tests functionality of updating the feed in the Aggregator module. */ class UpdateFeedTest extends AggregatorTestBase { public static function getInfo() { return array( 'name' => 'Update feed functionality', 'description' => 'Update feed test.', 'group' => 'Aggregator' ); } /** * Creates a feed and attempts to update it. */ function testUpdateFeed() { $remaining_fields = array('title[0][value]', 'url[0][value]', ''); foreach ($remaining_fields as $same_field) { $feed = $this->createFeed(); // Get new feed data array and modify newly created feed. $edit = $this->getFeedEditArray(); $edit['refresh'] = 1800; // Change refresh value. if (isset($feed->{$same_field}->value)) { $edit[$same_field] = $feed->{$same_field}->value; } $this->drupalPostForm('aggregator/sources/' . $feed->id() . '/configure', $edit, t('Save')); $this->assertRaw(t('The feed %name has been updated.', array('%name' => $edit['title[0][value]'])), format_string('The feed %name has been updated.', array('%name' => $edit['title[0][value]']))); // Check feed data. $this->assertEqual($this->getUrl(), url('aggregator/sources/' . $feed->id(), array('absolute' => TRUE))); $this->assertTrue($this->uniqueFeed($edit['title[0][value]'], $edit['url[0][value]']), 'The feed is unique.'); // Check feed source. $this->drupalGet('aggregator/sources/' . $feed->id()); $this->assertResponse(200, 'Feed source exists.'); $this->assertText($edit['title[0][value]'], 'Page title'); // Delete feed. $feed->title = $edit['title[0][value]']; // Set correct title so deleteFeed() will work. $this->deleteFeed($feed); } } }
class UnimportedExtends2 { //:: error: cannot find symbol class Inner extends UnimportedClass {} }
/* * Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __TRINITY_VEHICLE_H #define __TRINITY_VEHICLE_H #include "ObjectDefines.h" #include "Object.h" #include "VehicleDefines.h" #include "Unit.h" #include <list> struct VehicleEntry; class Unit; class VehicleJoinEvent; class TC_GAME_API Vehicle : public TransportBase { protected: friend bool Unit::CreateVehicleKit(uint32 id, uint32 creatureEntry); Vehicle(Unit* unit, VehicleEntry const* vehInfo, uint32 creatureEntry); friend void Unit::RemoveVehicleKit(); ~Vehicle(); public: void Install(); void Uninstall(); void Reset(bool evading = false); void InstallAllAccessories(bool evading); void ApplyAllImmunities(); void InstallAccessory(uint32 entry, int8 seatId, bool minion, uint8 type, uint32 summonTime); //! May be called from scripts Unit* GetBase() const { return _me; } VehicleEntry const* GetVehicleInfo() const { return _vehicleInfo; } uint32 GetCreatureEntry() const { return _creatureEntry; } bool HasEmptySeat(int8 seatId) const; Unit* GetPassenger(int8 seatId) const; SeatMap::const_iterator GetNextEmptySeat(int8 seatId, bool next) const; uint8 GetAvailableSeatCount() const; bool AddPassenger(Unit* passenger, int8 seatId = -1); void EjectPassenger(Unit* passenger, Unit* controller); Vehicle* RemovePassenger(Unit* passenger); void RelocatePassengers(); void RemoveAllPassengers(); bool IsVehicleInUse() const; void SetLastShootPos(Position const& pos) { _lastShootPos.Relocate(pos); } Position const& GetLastShootPos() const { return _lastShootPos; } SeatMap Seats; ///< The collection of all seats on the vehicle. Including vacant ones. VehicleSeatEntry const* GetSeatForPassenger(Unit const* passenger) const; void RemovePendingEventsForPassenger(Unit* passenger); protected: friend class VehicleJoinEvent; uint32 UsableSeatNum; ///< Number of seats that match VehicleSeatEntry::UsableByPlayer, used for proper display flags private: enum Status { STATUS_NONE, STATUS_INSTALLED, STATUS_UNINSTALLING, }; SeatMap::iterator GetSeatIteratorForPassenger(Unit* passenger); void InitMovementInfoForBase(); /// This method transforms supplied transport offsets into global coordinates void CalculatePassengerPosition(float& x, float& y, float& z, float* o /*= NULL*/) const override { TransportBase::CalculatePassengerPosition(x, y, z, o, GetBase()->GetPositionX(), GetBase()->GetPositionY(), GetBase()->GetPositionZ(), GetBase()->GetOrientation()); } /// This method transforms supplied global coordinates into local offsets void CalculatePassengerOffset(float& x, float& y, float& z, float* o /*= NULL*/) const override { TransportBase::CalculatePassengerOffset(x, y, z, o, GetBase()->GetPositionX(), GetBase()->GetPositionY(), GetBase()->GetPositionZ(), GetBase()->GetOrientation()); } void RemovePendingEvent(VehicleJoinEvent* e); void RemovePendingEventsForSeat(int8 seatId); private: Unit* _me; ///< The underlying unit with the vehicle kit. Can be player or creature. VehicleEntry const* _vehicleInfo; ///< DBC data for vehicle GuidSet vehiclePlayers; uint32 _creatureEntry; ///< Can be different than the entry of _me in case of players Status _status; ///< Internal variable for sanity checks Position _lastShootPos; typedef std::list<VehicleJoinEvent*> PendingJoinEventContainer; PendingJoinEventContainer _pendingJoinEvents; ///< Collection of delayed join events for prospective passengers }; class TC_GAME_API VehicleJoinEvent : public BasicEvent { friend class Vehicle; protected: VehicleJoinEvent(Vehicle* v, Unit* u) : Target(v), Passenger(u), Seat(Target->Seats.end()) { } ~VehicleJoinEvent(); bool Execute(uint64, uint32) override; void Abort(uint64) override; Vehicle* Target; Unit* Passenger; SeatMap::iterator Seat; }; #endif
package com.baidu.disconf.client.core.processor.impl; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.baidu.disconf.client.common.model.DisConfCommonModel; import com.baidu.disconf.client.common.model.DisconfCenterItem; import com.baidu.disconf.client.config.DisClientConfig; import com.baidu.disconf.client.core.processor.DisconfCoreProcessor; import com.baidu.disconf.client.fetcher.FetcherMgr; import com.baidu.disconf.client.store.DisconfStoreProcessor; import com.baidu.disconf.client.store.DisconfStoreProcessorFactory; import com.baidu.disconf.client.store.processor.model.DisconfValue; import com.baidu.disconf.client.watch.WatchMgr; import com.baidu.disconf.core.common.constants.DisConfigTypeEnum; /** * 配置项处理器实现 * * @author liaoqiqi * @version 2014-8-4 */ public class DisconfItemCoreProcessorImpl implements DisconfCoreProcessor { protected static final Logger LOGGER = LoggerFactory.getLogger(DisconfItemCoreProcessorImpl.class); // 监控器 private WatchMgr watchMgr = null; // 抓取器 private FetcherMgr fetcherMgr = null; // 仓库算子 private DisconfStoreProcessor disconfStoreProcessor = DisconfStoreProcessorFactory.getDisconfStoreItemProcessor(); public DisconfItemCoreProcessorImpl(WatchMgr watchMgr, FetcherMgr fetcherMgr) { this.fetcherMgr = fetcherMgr; this.watchMgr = watchMgr; } /** * */ @Override public void processAllItems() { /** * 配置ITEM列表处理 */ for (String key : disconfStoreProcessor.getConfKeySet()) { processOneItem(key); } } @Override public void processOneItem(String key) { LOGGER.debug("==============\tstart to process disconf item: " + key + "\t============================="); DisconfCenterItem disconfCenterItem = (DisconfCenterItem) disconfStoreProcessor.getConfData(key); if (disconfCenterItem != null) { try { updateOneConfItem(key, disconfCenterItem); } catch (Exception e) { LOGGER.error(e.toString(), e); } } } /** * 更新 一个配置 */ private void updateOneConf(String keyName) throws Exception { DisconfCenterItem disconfCenterItem = (DisconfCenterItem) disconfStoreProcessor.getConfData(keyName); if (disconfCenterItem != null) { // 更新仓库 updateOneConfItem(keyName, disconfCenterItem); // 更新实例 inject2OneConf(keyName, disconfCenterItem); } } /** * 更新一个配置 */ private void updateOneConfItem(String keyName, DisconfCenterItem disconfCenterItem) throws Exception { if (disconfCenterItem == null) { throw new Exception("cannot find disconfCenterItem " + keyName); } String value = null; // // 开启disconf才需要远程下载, 否则就用默认值 // if (DisClientConfig.getInstance().ENABLE_DISCONF) { // // 下载配置 // try { String url = disconfCenterItem.getRemoteServerUrl(); value = fetcherMgr.getValueFromServer(url); if (value != null) { LOGGER.debug("value: " + value); } } catch (Exception e) { LOGGER.error("cannot use remote configuration: " + keyName, e); LOGGER.info("using local variable: " + keyName); } LOGGER.debug("download ok."); } // // 注入到仓库中 // disconfStoreProcessor.inject2Store(keyName, new DisconfValue(value, null)); LOGGER.debug("inject ok."); // // Watch // if (DisClientConfig.getInstance().ENABLE_DISCONF) { if (watchMgr != null) { DisConfCommonModel disConfCommonModel = disconfStoreProcessor.getCommonModel(keyName); watchMgr.watchPath(this, disConfCommonModel, keyName, DisConfigTypeEnum.ITEM, value); LOGGER.debug("watch ok."); } else { LOGGER.warn("cannot monitor {} because watch mgr is null", keyName); } } } /** * 更新消息: */ @Override public void updateOneConfAndCallback(String key) throws Exception { // 更新 配置 updateOneConf(key); // 回调 DisconfCoreProcessUtils.callOneConf(disconfStoreProcessor, key); } /** * 某个配置项:注入到实例中 */ private void inject2OneConf(String key, DisconfCenterItem disconfCenterItem) { if (disconfCenterItem == null) { return; } try { Object object = null; Field field = disconfCenterItem.getField(); // // 静态 // if (!Modifier.isStatic(field.getModifiers())) { object = DisconfCoreProcessUtils.getSpringBean(field.getDeclaringClass()); } disconfStoreProcessor.inject2Instance(object, key); } catch (Exception e) { LOGGER.warn(e.toString(), e); } } /** * */ @Override public void inject2Conf() { /** * 配置ITEM列表处理 */ for (String key : disconfStoreProcessor.getConfKeySet()) { LOGGER.debug("==============\tstart to inject value to disconf item instance: " + key + "\t============================="); DisconfCenterItem disconfCenterItem = (DisconfCenterItem) disconfStoreProcessor.getConfData(key); inject2OneConf(key, disconfCenterItem); } } }
/* * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package nsk.jdi.ShortValue.compareTo; import nsk.share.*; import nsk.share.jpda.*; import nsk.share.jdi.*; /** * The debugged application of the test. */ public class compareto001a { //----------------------------------------------------- immutable common fields private static int exitStatus; private static ArgumentHandler argHandler; private static Log log; private static IOPipe pipe; //---------------------------------------------------------- immutable common methods static void display(String msg) { log.display("debuggee > " + msg); } static void complain(String msg) { log.complain("debuggee FAILURE > " + msg); } public static void receiveSignal(String signal) { String line = pipe.readln(); if ( !line.equals(signal) ) throw new Failure("UNEXPECTED debugger's signal " + line); display("debuger's <" + signal + "> signal received."); } //------------------------------------------------------ mutable common fields //------------------------------------------------------ test specific fields static compareto001aClassToCheck testedObj = new compareto001aClassToCheck(); static String[] testedFields = { "cmpObjNULL", "cmpObject", "cmpBoolMAX", "cmpBoolMIN", "cmpByteMAX", "cmpByte1", "cmpByte0", "cmpByte_1", "cmpByteMIN", "cmpCharMAX", "cmpCharMIN", "cmpDoubleMAX", "cmpDouble1", "cmpDouble0", "cmpDouble_1", "cmpDoubleMIN", "cmpFloatMAX", "cmpFloat1", "cmpFloat0", "cmpFloat_1", "cmpFloatMIN", "cmpIntMAX", "cmpInt1", "cmpInt0", "cmpInt_1", "cmpIntMIN", "cmpLongMAX", "cmpLong1", "cmpLong0", "cmpLong_1", "cmpLongMIN", "cmpShortMAX", "cmpShort1", "cmpShort0", "cmpShort_1", "cmpShortMIN" }; static Object cmpObjNULL = null; static Object cmpObject = new Object(); static boolean cmpBoolMAX = true; static boolean cmpBoolMIN = false; static byte cmpByteMAX = Byte.MAX_VALUE; static byte cmpByte1 = 1; static byte cmpByte0 = 0; static byte cmpByte_1 = -1; static byte cmpByteMIN = Byte.MIN_VALUE; static char cmpCharMAX = Character.MAX_VALUE; static char cmpCharMIN = Character.MIN_VALUE; static double cmpDoubleMAX= Double.MAX_VALUE; static double cmpDouble1 = 1; static double cmpDouble0 = 0; static double cmpDouble_1 = -1; static double cmpDoubleMIN= Double.MIN_VALUE; static float cmpFloatMAX = Float.MAX_VALUE; static float cmpFloat1 = 1; static float cmpFloat0 = 0; static float cmpFloat_1 = -1; static float cmpFloatMIN = Float.MIN_VALUE; static int cmpIntMAX = Integer.MAX_VALUE; static int cmpInt1 = 1; static int cmpInt0 = 0; static int cmpInt_1 = -1; static int cmpIntMIN = Integer.MIN_VALUE; static long cmpLongMAX = Long.MAX_VALUE; static long cmpLong1 = 1; static long cmpLong0 = 0; static long cmpLong_1 = -1; static long cmpLongMIN = Long.MIN_VALUE; static short cmpShortMAX = Short.MAX_VALUE; static short cmpShort1 = 1; static short cmpShort0 = 0; static short cmpShort_1 = -1; static short cmpShortMIN = Short.MIN_VALUE; //------------------------------------------------------ mutable common method public static void main (String argv[]) { exitStatus = Consts.TEST_FAILED; argHandler = new ArgumentHandler(argv); log = argHandler.createDebugeeLog(); pipe = argHandler.createDebugeeIOPipe(log); try { pipe.println(compareto001.SIGNAL_READY); // receiveSignal(compareto001.SIGNAL_GO); receiveSignal(compareto001.SIGNAL_QUIT); display("completed succesfully."); System.exit(Consts.TEST_PASSED + Consts.JCK_STATUS_BASE); } catch (Failure e) { log.complain(e.getMessage()); System.exit(Consts.TEST_FAILED + Consts.JCK_STATUS_BASE); } } //--------------------------------------------------------- test specific methods } //--------------------------------------------------------- test specific classes class compareto001aClassToCheck { public short shortMAX = Short.MAX_VALUE; public short short1 = 1; public short short0 = 0; public short short_1 = -1; public short shortMIN = Short.MIN_VALUE; }
/**@license boxplus image transition engine * @author Levente Hunyadi * @version 1.4.2 * @remarks Copyright (C) 2009-2010 Levente Hunyadi * @remarks Licensed under GNU/GPLv3, see http://www.gnu.org/licenses/gpl-3.0.html * @see http://hunyadi.info.hu/projects/boxplus **/ /* * boxplus: a lightweight pop-up window engine shipped with sigplus * Copyright 2009-2010 Levente Hunyadi * * boxplus is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * boxplus is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with boxplus. If not, see <http://www.gnu.org/licenses/>. */ if (typeof(__jQuery__) == 'undefined') { var __jQuery__ = jQuery; } (function ($) { var CLASS_DISABLED = 'boxplus-disabled'; var max = Math.max; var floor = Math.floor; var ceil = Math.ceil; /** * Maximum computed width of matched elements including margin, border and padding. */ $.fn.maxWidth = function () { var width = 0; this.each( function(index, el) { width = max(width, $(el).safeWidth()); }); return width; } /** * Maximum computed height of matched elements including margin, border and padding. */ $.fn.maxHeight = function () { var height = 0; this.each( function(index, el) { height = max(height, $(el).safeHeight()); }); return height; } /** * "Safe" dimension of an element. * Some browsers give invalid values with .width() but others give the meaningless, * value "auto" with .css('width'), this function bridges the differences. */ function _safeDimension(obj, dim) { var cssvalue = parseInt(obj.css(dim)); return isNaN(cssvalue) ? obj[dim]() : cssvalue; } $.fn.safeWidth = function () { return _safeDimension(this, 'width'); } $.fn.safeHeight = function () { return _safeDimension(this, 'height'); } /** * Creates a new image slider from a collection of images. * The method should be called on a ul or ol element that wraps a set of li elements. */ $.fn.boxplusTransition = function (settings) { // default configuration properties var defaults = { navigation: 'horizontal', // orientation of navigation buttons, or do not show navigation buttons at all ['horizontal'|'vertical'|false] loop: true, // whether the image sequence loops such that the first image follows the last [true|false] contextmenu: true, // whether the context menu appears when right-clicking an image [true|false] orientation: 'vertical', // alignment of bars used in transition ['vertical'|'horizontal'] slices: 15, // number of bars to use in transition animation effect: 'fade', // image transition effect ['fade'|'bars'|'bars+fade'|'shutter'|'shutter+fade'] easing: 'swing', duration: 500, // duration for transition animation [ms] delay: 4000 // delay between successive animation steps [ms] }; settings = $.extend(defaults, settings); var lists = this.filter('ul, ol'); // filter elements that are not lists // iterate over elements if invoked on an element collection lists.each(function () { // short-hand access to settings var isNavigationVertical = settings.navigation == 'vertical'; var isOrientationHorizontal = settings.orientation == 'horizontal'; var sliceCount = settings.slices; var duration = settings.duration; var delay = settings.delay; // status information var sliderIndexPosition = 0; // index of item currently shown var animation = false; // true if an animation is in progress // DOM elements var list = $(this).wrap('<div />').before('<div />').addClass('boxplus-hidden'); var wrapper = list.parent().addClass('boxplus-wrapper'); var items = $('li', list).css({ position: 'absolute', left: 0, top: 0 }).find('img:first'); // forces following an anchor (in a cancellable way) even when click event is triggered with jQuery items.parent('a').click(function (event) { if (!event.isDefaultPrevented()) { location.href = this.href; } }); var container = list.prev().addClass('boxplus-transition').addClass(CLASS_DISABLED).click(function () { items.eq(sliderIndexPosition).parent('a').click(); // when an image is clicked, the anchor wrapping the original image (if any) should be followed }); // get maximum width and height of image slider items var itemCount = items.length; var itemWidth = items.maxWidth(); var itemHeight = items.maxHeight(); // set width and height of image container wrapper.add(container).css({ width: itemWidth, height: itemHeight }); switch (settings.navigation) { case 'horizontal': case 'vertical': var cls = 'boxplus-' + settings.navigation; container.addClass(cls); // setup overlay navigation controls function _addButton(cls) { return '<div class="boxplus-' + cls + '" />'; } container.prepend( $(_addButton('prev') + _addButton('next')).addClass(cls).addClass( (isNavigationVertical ? itemWidth : itemHeight) < 120 ? 'boxplus-small' : 'boxplus-large' ) ); // bind events for navigation controls $('.boxplus-prev', container).click(scrollPrevious); $('.boxplus-next', container).click(scrollNext); } if (!settings.contextmenu) { $(document).bind('contextmenu', function (event) { // subscribe to right-click event return !container.children().add(container).filter(event.target).size(); // prevent right-click on image }); } // add bars to container for animation var sliceDim = (isOrientationHorizontal ? itemHeight : itemWidth) / sliceCount; for (var sliceIndex = 0; sliceIndex < sliceCount; sliceIndex++) { var sliceOffset = floor(sliceIndex*sliceDim); $('<div class="boxplus-transition-bars" />').css({ left: isOrientationHorizontal ? 0 : sliceOffset, top: isOrientationHorizontal ? sliceOffset : 0, height: isOrientationHorizontal ? sliceDim : itemHeight, width: isOrientationHorizontal ? itemWidth : sliceDim, visibility: 'hidden' }).appendTo(container); } // update visibility of navigation controls _updatePaging(); container.removeClass(CLASS_DISABLED); scrollFirst(); // slider animation if (delay > 0) { delay = max(delay, duration + 500); var intervalID = window.setInterval(scrollNext, delay); // stop animation when mouse moves over an image container.mouseover(function () { window.clearInterval(intervalID); }).mouseout(function () { intervalID = window.setInterval(scrollNext, delay); }); } // // Callback functions // function scrollFirst() { return scroll('first'); } function scrollPrevious() { return scroll('prev'); } function scrollNext() { return scroll('next'); } function scrollLast() { return scroll('last'); } /** * Sets the image shown as the background image of elements. * @param elem The element whose background-image property to set. */ function _setImage(e, x, y) { var item = items.eq(sliderIndexPosition); // item to be shown e.css({ backgroundImage: 'url("' + item.attr('src') + '")', backgroundPosition: ((itemWidth - item.safeWidth()) / 2 - x) + 'px ' + ((itemHeight - item.safeHeight()) / 2 - y) + 'px' }); } /** * Preloads an image for later display. * @param item The element to use to acquire the URL of the image. */ function _preloadImage(item) { var longdesc = item.attr('longdesc'); if (longdesc) { // higher-resolution image is available item.attr('src', longdesc).attr('longdesc', ''); } } function _preloadImages() { _preloadImage(items.eq(sliderIndexPosition)); _preloadImage(items.eq((sliderIndexPosition - 1) % itemCount)); _preloadImage(items.eq((sliderIndexPosition + 1) % itemCount)); } /** * Execute image transition. */ function scroll(dir) { var bars = $('.boxplus-transition-bars', container); if (animation) { // clear ongoing transitions _setImage(container, 0, 0); bars.clearQueue().stop().css('visibility', 'hidden'); } animation = true; // indicate an ongoing transition switch (dir) { case 'first': sliderIndexPosition = 0; break; case 'prev': sliderIndexPosition = (sliderIndexPosition - 1) % itemCount; break; case 'next': sliderIndexPosition = (sliderIndexPosition + 1) % itemCount; break; case 'last': sliderIndexPosition = itemCount - 1; break; default: return; }; _updatePaging(); _preloadImages(); bars.css({ // reset bars background image, height, width, opacity, etc. opacity: 1 }).each(function (index) { // set the image shown as the background image of bars with computing offset position var bar = $(this); var dim = ceil(index*sliceDim+sliceDim) - floor(index*sliceDim); bar.css({ height: isOrientationHorizontal ? dim : itemHeight, width: isOrientationHorizontal ? itemWidth : dim }); var position = bar.position(); _setImage(bar, position.left, position.top); }); function _transitionFade() { bars.css('opacity', 0).show(); return {opacity: 1}; } function _transitionBars() { bars.css(isOrientationHorizontal ? 'width' : 'height', 0); if (isOrientationHorizontal) { return {width: itemWidth}; } else { return {height: itemHeight}; } } function _transitionShutter() { bars.css(isOrientationHorizontal ? 'height' : 'width', 0); if (isOrientationHorizontal) { return {height: ceil(sliceDim)}; } else { return {width: ceil(sliceDim)}; } } var target; switch (settings.effect) { case 'fade': target = _transitionFade(); break; case 'bars': target = _transitionBars(); break; case 'bars+fade': target = $.extend(_transitionBars(), _transitionFade()); break; case 'shutter': target = _transitionShutter(); break; case 'shutter+fade': target = $.extend(_transitionShutter(), _transitionFade()); break; } bars.css('visibility', 'visible'); // function to arrange bars in a specific order var ordfun = function (index) { return index; }; switch (dir) { case 'first': case 'prev': ordfun = function (index) { return sliceCount-1-index; }; break; } // register animation events for bars bars.each(function (index) { var k = ordfun(index); var options = { duration: 500, easing: settings.easing }; if (k == sliceCount-1) { $.extend(options, { complete: function () { animation = false; _setImage(container, 0, 0); bars.css('visibility', 'hidden'); } }); } // fire animation after an initial delay $(this).delay(k * duration / sliceCount).animate(target, options); }); return false; // prevent event propagation } /** * Update which navigation links are enabled. */ function _updatePaging() { if (!settings.loop) { $('.boxplus-prev', container).toggleClass(CLASS_DISABLED, sliderIndexPosition <= 0); $('.boxplus-next', container).toggleClass(CLASS_DISABLED, sliderIndexPosition >= itemCount-1); } } }); return this; // support chaining } })(__jQuery__);
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #include <string.h> #include <stdlib.h> #include <math.h> #include "fix_temp_rescale.h" #include "atom.h" #include "force.h" #include "group.h" #include "update.h" #include "domain.h" #include "region.h" #include "comm.h" #include "input.h" #include "variable.h" #include "modify.h" #include "compute.h" #include "error.h" using namespace LAMMPS_NS; using namespace FixConst; enum{NOBIAS,BIAS}; enum{CONSTANT,EQUAL}; /* ---------------------------------------------------------------------- */ FixTempRescale::FixTempRescale(LAMMPS *lmp, int narg, char **arg) : Fix(lmp, narg, arg), tstr(NULL), id_temp(NULL), tflag(0) { if (narg < 8) error->all(FLERR,"Illegal fix temp/rescale command"); nevery = force->inumeric(FLERR,arg[3]); if (nevery <= 0) error->all(FLERR,"Illegal fix temp/rescale command"); scalar_flag = 1; global_freq = nevery; extscalar = 1; tstr = NULL; if (strstr(arg[4],"v_") == arg[4]) { int n = strlen(&arg[4][2]) + 1; tstr = new char[n]; strcpy(tstr,&arg[4][2]); tstyle = EQUAL; } else { t_start = force->numeric(FLERR,arg[4]); t_target = t_start; tstyle = CONSTANT; } t_stop = force->numeric(FLERR,arg[5]); t_window = force->numeric(FLERR,arg[6]); fraction = force->numeric(FLERR,arg[7]); // create a new compute temp // id = fix-ID + temp, compute group = fix group int n = strlen(id) + 6; id_temp = new char[n]; strcpy(id_temp,id); strcat(id_temp,"_temp"); char **newarg = new char*[6]; newarg[0] = id_temp; newarg[1] = group->names[igroup]; newarg[2] = (char *) "temp"; modify->add_compute(3,newarg); delete [] newarg; tflag = 1; energy = 0.0; } /* ---------------------------------------------------------------------- */ FixTempRescale::~FixTempRescale() { delete [] tstr; // delete temperature if fix created it if (tflag) modify->delete_compute(id_temp); delete [] id_temp; } /* ---------------------------------------------------------------------- */ int FixTempRescale::setmask() { int mask = 0; mask |= END_OF_STEP; mask |= THERMO_ENERGY; return mask; } /* ---------------------------------------------------------------------- */ void FixTempRescale::init() { // check variable if (tstr) { tvar = input->variable->find(tstr); if (tvar < 0) error->all(FLERR,"Variable name for fix temp/rescale does not exist"); if (input->variable->equalstyle(tvar)) tstyle = EQUAL; else error->all(FLERR,"Variable for fix temp/rescale is invalid style"); } int icompute = modify->find_compute(id_temp); if (icompute < 0) error->all(FLERR,"Temperature ID for fix temp/rescale does not exist"); temperature = modify->compute[icompute]; if (temperature->tempbias) which = BIAS; else which = NOBIAS; } /* ---------------------------------------------------------------------- */ void FixTempRescale::end_of_step() { double t_current = temperature->compute_scalar(); // there is nothing to do, if there are no degrees of freedom if (temperature->dof < 1) return; // protect against division by zero if (t_current == 0.0) error->all(FLERR,"Computed temperature for fix temp/rescale cannot be 0.0"); double delta = update->ntimestep - update->beginstep; if (delta != 0.0) delta /= update->endstep - update->beginstep; // set current t_target // if variable temp, evaluate variable, wrap with clear/add if (tstyle == CONSTANT) t_target = t_start + delta * (t_stop-t_start); else { modify->clearstep_compute(); t_target = input->variable->compute_equal(tvar); if (t_target < 0.0) error->one(FLERR, "Fix temp/rescale variable returned negative temperature"); modify->addstep_compute(update->ntimestep + nevery); } // rescale velocity of appropriate atoms if outside window // for BIAS: // temperature is current, so do not need to re-compute // OK to not test returned v = 0, since factor is multiplied by v if (fabs(t_current-t_target) > t_window) { t_target = t_current - fraction*(t_current-t_target); double factor = sqrt(t_target/t_current); double efactor = 0.5 * force->boltz * temperature->dof; double **v = atom->v; int *mask = atom->mask; int nlocal = atom->nlocal; energy += (t_current-t_target) * efactor; if (which == NOBIAS) { for (int i = 0; i < nlocal; i++) { if (mask[i] & groupbit) { v[i][0] *= factor; v[i][1] *= factor; v[i][2] *= factor; } } } else { for (int i = 0; i < nlocal; i++) { if (mask[i] & groupbit) { temperature->remove_bias(i,v[i]); v[i][0] *= factor; v[i][1] *= factor; v[i][2] *= factor; temperature->restore_bias(i,v[i]); } } } } } /* ---------------------------------------------------------------------- */ int FixTempRescale::modify_param(int narg, char **arg) { if (strcmp(arg[0],"temp") == 0) { if (narg < 2) error->all(FLERR,"Illegal fix_modify command"); if (tflag) { modify->delete_compute(id_temp); tflag = 0; } delete [] id_temp; int n = strlen(arg[1]) + 1; id_temp = new char[n]; strcpy(id_temp,arg[1]); int icompute = modify->find_compute(id_temp); if (icompute < 0) error->all(FLERR,"Could not find fix_modify temperature ID"); temperature = modify->compute[icompute]; if (temperature->tempflag == 0) error->all(FLERR, "Fix_modify temperature ID does not compute temperature"); if (temperature->igroup != igroup && comm->me == 0) error->warning(FLERR,"Group for fix_modify temp != fix group"); return 2; } return 0; } /* ---------------------------------------------------------------------- */ void FixTempRescale::reset_target(double t_new) { t_target = t_start = t_stop = t_new; } /* ---------------------------------------------------------------------- */ double FixTempRescale::compute_scalar() { return energy; } /* ---------------------------------------------------------------------- extract thermostat properties ------------------------------------------------------------------------- */ void *FixTempRescale::extract(const char *str, int &dim) { if (strcmp(str,"t_target") == 0) { dim = 0; return &t_target; } return NULL; }
class CBA_Extended_EventHandlers; class CfgVehicles { // Static weapons class LandVehicle; class StaticWeapon: LandVehicle { GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1.2,0}; GVAR(carryDirection) = 0; GVAR(canDrag) = 1; GVAR(dragPosition)[] = {0,1.2,0}; GVAR(dragDirection) = 0; }; class StaticCannon: StaticWeapon { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; class StaticMortar; class Mortar_01_base_F: StaticMortar { GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1.2,0}; GVAR(carryDirection) = 0; GVAR(canDrag) = 1; GVAR(dragPosition)[] = {0,1.2,0}; GVAR(dragDirection) = 0; }; // ammo boxes class ThingX; class Items_base_F; class ReammoBox_F: ThingX { GVAR(canCarry) = 0; GVAR(carryPosition)[] = {0,1,1}; GVAR(carryDirection) = 0; GVAR(canDrag) = 0; GVAR(dragPosition)[] = {0,1.2,0}; GVAR(dragDirection) = 0; }; class Slingload_base_F: ReammoBox_F { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; //remove actions from Taru Pods class Pod_Heli_Transport_04_base_F: Slingload_base_F { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; class EAST_Box_Base: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class IND_Box_Base: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; /*class FIA_Box_Base_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; };*/ class NATO_Box_Base: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class Box_Syndicate_Ammo_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class Box_IED_Exp_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class Box_Syndicate_Wps_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class Box_Syndicate_WpsLaunch_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(canDrag) = 1; }; class Box_NATO_Equip_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; }; class Box_NATO_Uniforms_F: ReammoBox_F { GVAR(canCarry) = 1; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; }; // Remove Larger crate dragging support. // Would be better to allow some sort of joint push/drag functionality // Requiring 2 units to access the larger crates and attaching them together (a crappy method of doing it) // in order to move the bigger ones. Currently simply remove support. // I believe these crates are currently broken (hitbox doesn't work or something) in 1.22 (2014-07-04) class Box_East_AmmoVeh_F: EAST_Box_Base { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; class Box_NATO_AmmoVeh_F: NATO_Box_Base { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; class Box_IND_AmmoVeh_F: IND_Box_Base { GVAR(canCarry) = 0; GVAR(canDrag) = 0; }; //Plastic and metal case class PlasticCase_01_base_F: Items_base_F { class EventHandlers { class CBA_Extended_EventHandlers: CBA_Extended_EventHandlers {}; }; GVAR(canCarry) = 1; GVAR(carryPosition[]) = {0,1,1}; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; GVAR(dragPosition[]) = {0,1.2,0}; GVAR(dragDirection) = 0; }; class MetalCase_01_base_F: Items_base_F { class EventHandlers { class CBA_Extended_EventHandlers: CBA_Extended_EventHandlers {}; }; GVAR(canCarry) = 1; GVAR(carryPosition[]) = {0,1,1}; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; GVAR(dragPosition[]) = {0,1.2,0}; GVAR(dragDirection) = 0; }; // Barrier class RoadCone_F: ThingX { class EventHandlers { class CBA_Extended_EventHandlers: CBA_Extended_EventHandlers {}; }; GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1,1}; GVAR(carryDirection) = 0; GVAR(canDrag) = 1; GVAR(dragPosition)[] = {0,1.2,0}; GVAR(dragDirection) = 0; }; class RoadBarrier_F: RoadCone_F { GVAR(carryPosition)[] = {0,1,0.300671}; }; // Misc crates class Constructions_base_F; class Land_WoodenBox_F: Constructions_base_F { class EventHandlers { class CBA_Extended_EventHandlers: CBA_Extended_EventHandlers {}; }; GVAR(canCarry) = 1; GVAR(carryPosition[]) = {0,1,1}; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; GVAR(dragPosition[]) = {0,1.4,0}; GVAR(dragDirection) = 0; }; class Land_WoodenCrate_01_F: ThingX { class EventHandlers { class CBA_Extended_EventHandlers: CBA_Extended_EventHandlers {}; }; GVAR(canCarry) = 1; GVAR(carryPosition[]) = {0,1,1}; GVAR(carryDirection) = 270; GVAR(canDrag) = 1; GVAR(dragPosition[]) = {0,1.5,0}; GVAR(dragDirection) = 90; }; class ACE_RepairItem_Base: ThingX {}; class ACE_Track: ACE_RepairItem_Base { GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1,1}; GVAR(carryDirection) = 0; }; class ACE_Wheel: ACE_RepairItem_Base { GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1,1}; GVAR(carryDirection) = 0; }; class Lamps_base_F; class Land_PortableLight_single_F: Lamps_base_F { GVAR(canCarry) = 1; GVAR(carryPosition)[] = {0,1.2,0}; GVAR(carryDirection) = 180; GVAR(canDrag) = 1; GVAR(dragPosition)[] = {0,1.2,0}; GVAR(dragDirection) = 180; }; };
/* Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <errno.h> #include <sys/types.h> #include <sys/ptrace.h> #include <stdarg.h> #include <sysdep.h> #include <sys/syscall.h> extern long int __syscall_ptrace (int, pid_t, void *, void *); long int ptrace (enum __ptrace_request request, ...) { long int res, ret; va_list ap; pid_t pid; void *addr, *data; va_start (ap, request); pid = va_arg (ap, pid_t); addr = va_arg (ap, void *); data = va_arg (ap, void *); va_end (ap); if (request > 0 && request < 4) data = &ret; res = INLINE_SYSCALL (ptrace, 4, request, pid, addr, data); if (res >= 0 && request > 0 && request < 4) { __set_errno (0); return ret; } return res; }
<html lang="en"> <head> <title>Canonical format - Untitled</title> <meta http-equiv="Content-Type" content="text/html"> <meta name="description" content="Untitled"> <meta name="generator" content="makeinfo 4.13"> <link title="Top" rel="start" href="index.html#Top"> <link rel="up" href="BFD-outline.html#BFD-outline" title="BFD outline"> <link rel="prev" href="BFD-information-loss.html#BFD-information-loss" title="BFD information loss"> <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage"> <!-- This file documents the GNU linker LD (Sourcery CodeBench Lite 2011.09-69) version 2.21.53. Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''.--> <meta http-equiv="Content-Style-Type" content="text/css"> <style type="text/css"><!-- pre.display { font-family:inherit } pre.format { font-family:inherit } pre.smalldisplay { font-family:inherit; font-size:smaller } pre.smallformat { font-family:inherit; font-size:smaller } pre.smallexample { font-size:smaller } pre.smalllisp { font-size:smaller } span.sc { font-variant:small-caps } span.roman { font-family:serif; font-weight:normal; } span.sansserif { font-family:sans-serif; font-weight:normal; } --></style> <link rel="stylesheet" type="text/css" href="../cs.css"> </head> <body> <div class="node"> <a name="Canonical-format"></a> <p> Previous:&nbsp;<a rel="previous" accesskey="p" href="BFD-information-loss.html#BFD-information-loss">BFD information loss</a>, Up:&nbsp;<a rel="up" accesskey="u" href="BFD-outline.html#BFD-outline">BFD outline</a> <hr> </div> <h4 class="subsection">5.1.2 The BFD canonical object-file format</h4> <p>The greatest potential for loss of information occurs when there is the least overlap between the information provided by the source format, that stored by the canonical format, and that needed by the destination format. A brief description of the canonical form may help you understand which kinds of data you can count on preserving across conversions. <a name="index-BFD-canonical-format-699"></a><a name="index-internal-object_002dfile-format-700"></a> <dl> <dt><em>files</em><dd>Information stored on a per-file basis includes target machine architecture, particular implementation format type, a demand pageable bit, and a write protected bit. Information like Unix magic numbers is not stored here&mdash;only the magic numbers' meaning, so a <code>ZMAGIC</code> file would have both the demand pageable bit and the write protected text bit set. The byte order of the target is stored on a per-file basis, so that big- and little-endian object files may be used with one another. <br><dt><em>sections</em><dd>Each section in the input file contains the name of the section, the section's original address in the object file, size and alignment information, various flags, and pointers into other BFD data structures. <br><dt><em>symbols</em><dd>Each symbol contains a pointer to the information for the object file which originally defined it, its name, its value, and various flag bits. When a BFD back end reads in a symbol table, it relocates all symbols to make them relative to the base of the section where they were defined. Doing this ensures that each symbol points to its containing section. Each symbol also has a varying amount of hidden private data for the BFD back end. Since the symbol points to the original file, the private data format for that symbol is accessible. <code>ld</code> can operate on a collection of symbols of wildly different formats without problems. <p>Normal global and simple local symbols are maintained on output, so an output file (no matter its format) will retain symbols pointing to functions and to global, static, and common variables. Some symbol information is not worth retaining; in <code>a.out</code>, type information is stored in the symbol table as long symbol names. This information would be useless to most COFF debuggers; the linker has command line switches to allow users to throw it away. <p>There is one word of type information within the symbol, so if the format supports symbol type information within symbols (for example, COFF, IEEE, Oasys) and the type is simple enough to fit within one word (nearly everything but aggregates), the information will be preserved. <br><dt><em>relocation level</em><dd>Each canonical BFD relocation record contains a pointer to the symbol to relocate to, the offset of the data to relocate, the section the data is in, and a pointer to a relocation type descriptor. Relocation is performed by passing messages through the relocation type descriptor and the symbol pointer. Therefore, relocations can be performed on output data using a relocation method that is only available in one of the input formats. For instance, Oasys provides a byte relocation format. A relocation record requesting this relocation type would point indirectly to a routine to perform this, so the relocation may be performed on a byte being written to a 68k COFF file, even though 68k COFF has no such relocation type. <br><dt><em>line numbers</em><dd>Object formats can contain, for debugging purposes, some form of mapping between symbols, source line numbers, and addresses in the output file. These addresses have to be relocated along with the symbol information. Each symbol with an associated list of line number records points to the first record of the list. The head of a line number list consists of a pointer to the symbol, which allows finding out the address of the function whose line number is being described. The rest of the list is made up of pairs: offsets into the section and line numbers. Any format which can simply derive this information can pass it successfully between formats (COFF, IEEE and Oasys). </dl> </body></html>
/* ChibiOS - Copyright (C) 2006..2016 Giovanni Di Sirio Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * @file STM32L1xx/hal_ext_lld_isr.c * @brief STM32L1xx EXT subsystem low level driver ISR code. * * @addtogroup EXT * @{ */ #include "hal.h" #if HAL_USE_EXT || defined(__DOXYGEN__) #include "hal_ext_lld_isr.h" /*===========================================================================*/ /* Driver local definitions. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver exported variables. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local variables. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local functions. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver interrupt handlers. */ /*===========================================================================*/ /** * @brief EXTI[0] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector58) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 0); EXTI->PR = pr; if (pr & (1U << 0)) EXTD1.config->channels[0].cb(&EXTD1, 0); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[1] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector5C) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 1); EXTI->PR = pr; if (pr & (1U << 1)) EXTD1.config->channels[1].cb(&EXTD1, 1); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[2] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector60) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 2); EXTI->PR = pr; if (pr & (1U << 2)) EXTD1.config->channels[2].cb(&EXTD1, 2); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[3] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector64) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 3); EXTI->PR = pr; if (pr & (1U << 3)) EXTD1.config->channels[3].cb(&EXTD1, 3); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[4] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector68) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 4); EXTI->PR = pr; if (pr & (1U << 4)) EXTD1.config->channels[4].cb(&EXTD1, 4); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[5]...EXTI[9] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(Vector9C) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & ((1U << 5) | (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9)); EXTI->PR = pr; if (pr & (1U << 5)) EXTD1.config->channels[5].cb(&EXTD1, 5); if (pr & (1U << 6)) EXTD1.config->channels[6].cb(&EXTD1, 6); if (pr & (1U << 7)) EXTD1.config->channels[7].cb(&EXTD1, 7); if (pr & (1U << 8)) EXTD1.config->channels[8].cb(&EXTD1, 8); if (pr & (1U << 9)) EXTD1.config->channels[9].cb(&EXTD1, 9); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[10]...EXTI[15] interrupt handler. * * @isr */ OSAL_IRQ_HANDLER(VectorE0) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & ((1U << 10) | (1U << 11) | (1U << 12) | (1U << 13) | (1U << 14) | (1U << 15)); EXTI->PR = pr; if (pr & (1U << 10)) EXTD1.config->channels[10].cb(&EXTD1, 10); if (pr & (1U << 11)) EXTD1.config->channels[11].cb(&EXTD1, 11); if (pr & (1U << 12)) EXTD1.config->channels[12].cb(&EXTD1, 12); if (pr & (1U << 13)) EXTD1.config->channels[13].cb(&EXTD1, 13); if (pr & (1U << 14)) EXTD1.config->channels[14].cb(&EXTD1, 14); if (pr & (1U << 15)) EXTD1.config->channels[15].cb(&EXTD1, 15); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[16] interrupt handler (PVD). * * @isr */ OSAL_IRQ_HANDLER(Vector44) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 16); EXTI->PR = pr; if (pr & (1U << 16)) EXTD1.config->channels[16].cb(&EXTD1, 16); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[17] interrupt handler (RTC). * * @isr */ OSAL_IRQ_HANDLER(VectorE4) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 17); EXTI->PR = pr; if (pr & (1U << 17)) EXTD1.config->channels[17].cb(&EXTD1, 17); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[18] interrupt handler (USB_FS_WKUP). * * @isr */ OSAL_IRQ_HANDLER(VectorE8) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 18); EXTI->PR = pr; if (pr & (1U << 18)) EXTD1.config->channels[18].cb(&EXTD1, 18); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[19] interrupt handler (TAMPER_STAMP). * * @isr */ OSAL_IRQ_HANDLER(Vector48) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 19); EXTI->PR = pr; if (pr & (1U << 19)) EXTD1.config->channels[19].cb(&EXTD1, 19); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[20] interrupt handler (RTC_WKUP). * * @isr */ OSAL_IRQ_HANDLER(Vector4C) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 20); EXTI->PR = pr; if (pr & (1U << 20)) EXTD1.config->channels[20].cb(&EXTD1, 20); OSAL_IRQ_EPILOGUE(); } /** * @brief EXTI[21]...EXTI[22] interrupt handler (COMP). * * @isr */ OSAL_IRQ_HANDLER(Vector98) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & ((1U << 21) | (1U << 22)); EXTI->PR = pr; if (pr & (1U << 21)) EXTD1.config->channels[21].cb(&EXTD1, 21); if (pr & (1U << 22)) EXTD1.config->channels[22].cb(&EXTD1, 22); OSAL_IRQ_EPILOGUE(); } #if (STM32_EXTI_NUM_LINES > 23) || defined(__DOXYGEN__) /** * @brief EXTI[23] interrupt handler (Channel Acquisition). * * @isr */ OSAL_IRQ_HANDLER(Vector120) { uint32_t pr; OSAL_IRQ_PROLOGUE(); pr = EXTI->PR; pr &= EXTI->IMR & (1U << 23); EXTI->PR = pr; if (pr & (1U << 23)) EXTD1.config->channels[23].cb(&EXTD1, 23); OSAL_IRQ_EPILOGUE(); } #endif /*===========================================================================*/ /* Driver exported functions. */ /*===========================================================================*/ /** * @brief Enables EXTI IRQ sources. * * @notapi */ void ext_lld_exti_irq_enable(void) { nvicEnableVector(EXTI0_IRQn, STM32_EXT_EXTI0_IRQ_PRIORITY); nvicEnableVector(EXTI1_IRQn, STM32_EXT_EXTI1_IRQ_PRIORITY); nvicEnableVector(EXTI2_IRQn, STM32_EXT_EXTI2_IRQ_PRIORITY); nvicEnableVector(EXTI3_IRQn, STM32_EXT_EXTI3_IRQ_PRIORITY); nvicEnableVector(EXTI4_IRQn, STM32_EXT_EXTI4_IRQ_PRIORITY); nvicEnableVector(EXTI9_5_IRQn, STM32_EXT_EXTI5_9_IRQ_PRIORITY); nvicEnableVector(EXTI15_10_IRQn, STM32_EXT_EXTI10_15_IRQ_PRIORITY); nvicEnableVector(PVD_IRQn, STM32_EXT_EXTI16_IRQ_PRIORITY); nvicEnableVector(RTC_Alarm_IRQn, STM32_EXT_EXTI17_IRQ_PRIORITY); nvicEnableVector(USB_FS_WKUP_IRQn, STM32_EXT_EXTI18_IRQ_PRIORITY); nvicEnableVector(TAMPER_STAMP_IRQn, STM32_EXT_EXTI19_IRQ_PRIORITY); nvicEnableVector(RTC_WKUP_IRQn, STM32_EXT_EXTI20_IRQ_PRIORITY); nvicEnableVector(COMP_IRQn, STM32_EXT_EXTI21_22_IRQ_PRIORITY); #if STM32_EXTI_NUM_LINES > 23 nvicEnableVector(COMP_ACQ_IRQn, STM32_EXT_EXTI23_IRQ_PRIORITY); #endif } /** * @brief Disables EXTI IRQ sources. * * @notapi */ void ext_lld_exti_irq_disable(void) { nvicDisableVector(EXTI0_IRQn); nvicDisableVector(EXTI1_IRQn); nvicDisableVector(EXTI2_IRQn); nvicDisableVector(EXTI3_IRQn); nvicDisableVector(EXTI4_IRQn); nvicDisableVector(EXTI9_5_IRQn); nvicDisableVector(EXTI15_10_IRQn); nvicDisableVector(PVD_IRQn); nvicDisableVector(RTC_Alarm_IRQn); nvicDisableVector(USB_FS_WKUP_IRQn); nvicDisableVector(TAMPER_STAMP_IRQn); nvicDisableVector(RTC_WKUP_IRQn); nvicDisableVector(COMP_IRQn); #if STM32_EXTI_NUM_LINES > 23 nvicDisableVector(COMP_ACQ_IRQn); #endif } #endif /* HAL_USE_EXT */ /** @} */
// (C) Copyright Jonathan Turkanis 2003. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) // See http://www.boost.org/libs/iostreams for documentation. // Contains the definitions of the class templates gzip_compressor and // gzip_decompressor for reading and writing files in the gzip file format // (RFC 1952). Based in part on work of Jonathan de Halleux; see [...] #ifndef BOOST_IOSTREAMS_GZIP_HPP_INCLUDED #define BOOST_IOSTREAMS_GZIP_HPP_INCLUDED #if defined(_MSC_VER) && (_MSC_VER >= 1020) # pragma once #endif #include <boost/config.hpp> // STATIC_CONSTANT, STDC_NAMESPACE, // DINKUMWARE_STDLIB, __STL_CONFIG_H. #include <algorithm> // min. #include <cstdio> // EOF. #include <cstddef> // size_t. #include <ctime> // std::time_t. #include <memory> // allocator. #include <boost/config.hpp> // Put size_t in std. #include <boost/detail/workaround.hpp> #include <boost/cstdint.hpp> // uint8_t, uint32_t. #include <boost/iostreams/constants.hpp> // buffer size. #include <boost/iostreams/detail/adapter/non_blocking_adapter.hpp> #include <boost/iostreams/detail/adapter/range_adapter.hpp> #include <boost/iostreams/detail/char_traits.hpp> #include <boost/iostreams/detail/error.hpp> #include <boost/iostreams/detail/ios.hpp> // failure. #include <boost/iostreams/operations.hpp> #include <boost/iostreams/device/back_inserter.hpp> #include <boost/iostreams/filter/zlib.hpp> #include <boost/iostreams/pipeline.hpp> // Must come last. #if defined(BOOST_MSVC) # pragma warning(push) # pragma warning(disable: 4309) // Truncation of constant value. #endif #ifdef BOOST_NO_STDC_NAMESPACE namespace std { using ::time_t; } #endif namespace boost { namespace iostreams { namespace gzip { using namespace boost::iostreams::zlib; // Error codes used by gzip_error. const int zlib_error = 1; const int bad_crc = 2; // Recorded crc doesn't match data. const int bad_length = 3; // Recorded length doesn't match data. const int bad_header = 4; // Malformed header. const int bad_footer = 5; // Malformed footer. namespace magic { // Magic numbers used by gzip header. const int id1 = 0x1f; const int id2 = 0x8b; } // End namespace magic. namespace method { // Codes used for the 'CM' byte of the gzip header. const int deflate = 8; } // End namespace method. namespace flags { // Codes used for the 'FLG' byte of the gzip header. const int text = 1; const int header_crc = 2; const int extra = 4; const int name = 8; const int comment = 16; } // End namespace flags. namespace extra_flags { // Codes used for the 'XFL' byte of the gzip header. const int best_compression = 2; const int best_speed = 4; } // End namespace extra_flags. // Codes used for the 'OS' byte of the gzip header. const int os_fat = 0; const int os_amiga = 1; const int os_vms = 2; const int os_unix = 3; const int os_vm_cms = 4; const int os_atari = 5; const int os_hpfs = 6; const int os_macintosh = 7; const int os_z_system = 8; const int os_cp_m = 9; const int os_tops_20 = 10; const int os_ntfs = 11; const int os_qdos = 12; const int os_acorn = 13; const int os_unknown = 255; } // End namespace gzip. // // Class name: gzip_params. // Description: Subclass of zlib_params with an additional field // representing a file name. // struct gzip_params : zlib_params { // Non-explicit constructor. gzip_params( int level = gzip::default_compression, int method = gzip::deflated, int window_bits = gzip::default_window_bits, int mem_level = gzip::default_mem_level, int strategy = gzip::default_strategy, std::string file_name = "", std::string comment = "", std::time_t mtime = 0 ) : zlib_params(level, method, window_bits, mem_level, strategy), file_name(file_name), mtime(mtime) { } std::string file_name; std::string comment; std::time_t mtime; }; // // Class name: gzip_error. // Description: Subclass of std::ios_base::failure thrown to indicate // zlib errors other than out-of-memory conditions. // class gzip_error : public BOOST_IOSTREAMS_FAILURE { public: explicit gzip_error(int error) : BOOST_IOSTREAMS_FAILURE("gzip error"), error_(error), zlib_error_code_(zlib::okay) { } explicit gzip_error(const zlib_error& e) : BOOST_IOSTREAMS_FAILURE("gzip error"), error_(gzip::zlib_error), zlib_error_code_(e.error()) { } int error() const { return error_; } int zlib_error_code() const { return zlib_error_code_; } private: int error_; int zlib_error_code_; }; // // Template name: gzip_compressor // Description: Model of OutputFilter implementing compression in the // gzip format. // template<typename Alloc = std::allocator<char> > class basic_gzip_compressor : basic_zlib_compressor<Alloc> { private: typedef basic_zlib_compressor<Alloc> base_type; public: typedef char char_type; struct category : dual_use, filter_tag, multichar_tag, closable_tag { }; basic_gzip_compressor( const gzip_params& = gzip::default_compression, int buffer_size = default_device_buffer_size ); template<typename Source> std::streamsize read(Source& src, char_type* s, std::streamsize n) { using namespace std; streamsize result = 0; // Read header. if (!(flags_ & f_header_done)) result += read_string(s, n, header_); // Read body. if (!(flags_ & f_body_done)) { // Read from basic_zlib_filter. streamsize amt = base_type::read(src, s + result, n - result); if (amt != -1) { result += amt; if (amt < n - result) { // Double-check for EOF. amt = base_type::read(src, s + result, n - result); if (amt != -1) result += amt; } } if (amt == -1) prepare_footer(); } // Read footer. if ((flags_ & f_body_done) != 0 && result < n) result += read_string(s + result, n - result, footer_); return result != 0 ? result : -1; } template<typename Sink> std::streamsize write(Sink& snk, const char_type* s, std::streamsize n) { if (!(flags_ & f_header_done)) { std::streamsize amt = static_cast<std::streamsize>(header_.size() - offset_); offset_ += boost::iostreams::write(snk, header_.data() + offset_, amt); if (offset_ == header_.size()) flags_ |= f_header_done; else return 0; } return base_type::write(snk, s, n); } template<typename Sink> void close(Sink& snk, BOOST_IOS::openmode m) { namespace io = boost::iostreams; if (m & BOOST_IOS::out) { // Close zlib compressor. base_type::close(snk, BOOST_IOS::out); if (flags_ & f_header_done) { // Write final fields of gzip file format. write_long(this->crc(), snk); write_long(this->total_in(), snk); } } #if BOOST_WORKAROUND(__GNUC__, == 2) && defined(__STL_CONFIG_H) || \ BOOST_WORKAROUND(BOOST_DINKUMWARE_STDLIB, == 1) \ /**/ footer_.erase(0, std::string::npos); #else footer_.clear(); #endif offset_ = 0; flags_ = 0; } private: static gzip_params normalize_params(gzip_params p); void prepare_footer(); std::streamsize read_string(char* s, std::streamsize n, std::string& str); template<typename Sink> static void write_long(long n, Sink& next) { boost::iostreams::put(next, static_cast<char>(0xFF & n)); boost::iostreams::put(next, static_cast<char>(0xFF & (n >> 8))); boost::iostreams::put(next, static_cast<char>(0xFF & (n >> 16))); boost::iostreams::put(next, static_cast<char>(0xFF & (n >> 24))); } enum flag_type { f_header_done = 1, f_body_done = f_header_done << 1, f_footer_done = f_body_done << 1 }; std::string header_; std::string footer_; std::size_t offset_; int flags_; }; BOOST_IOSTREAMS_PIPABLE(basic_gzip_compressor, 1) typedef basic_gzip_compressor<> gzip_compressor; // // Template name: basic_gzip_decompressor // Description: Model of InputFilter implementing compression in the // gzip format. // template<typename Alloc = std::allocator<char> > class basic_gzip_decompressor : basic_zlib_decompressor<Alloc> { public: typedef char char_type; struct category : //multichar_input_filter_tag , multichar_tag, filter_tag, input_seekable, closable_tag //seekable_filter_tag { }; basic_gzip_decompressor( int window_bits = gzip::default_window_bits, int buffer_size = default_device_buffer_size ); template <typename Source> std::streampos seek(Source &src, stream_offset off, BOOST_IOS::seekdir way) { if (way != BOOST_IOS::beg) { throw detail::cant_seek(); } non_blocking_adapter<Source> nb(src); std::streampos rval; boost::iostreams::seek(nb, 0, std::ios_base::beg); // reset the decoder //impl_type::reset(false, true); base_type::close(src, BOOST_IOS::in); flags_ = 0; // now seek std::streamsize nuint32s = off/sizeof(uint32_t); std::streamsize nuint8s = off%sizeof(uint32_t); uint32_t four_bytes; uint8_t one_byte; while (nuint32s > 0) { read(src, (char_type*)(&four_bytes), sizeof(uint32_t)); --nuint32s; rval += sizeof(uint32_t); } while (nuint8s > 0) { read(src, (char_type*)(&one_byte), sizeof(uint8_t)); --nuint8s; rval += sizeof(uint8_t); } return (rval); } template<typename Source> std::streamsize read(Source& src, char_type* s, std::streamsize n) { if ((flags_ & f_header_read) == 0) { non_blocking_adapter<Source> nb(src); read_header(nb); flags_ |= f_header_read; } if ((flags_ & f_footer_read) != 0) return -1; try { std::streamsize result = 0; std::streamsize amt; if ((amt = base_type::read(src, s, n)) != -1) { result += amt; if (amt < n) { // Double check for EOF. amt = base_type::read(src, s + result, n - result); if (amt != -1) result += amt; } } if (amt == -1) { non_blocking_adapter<Source> nb(src); read_footer(nb); flags_ |= f_footer_read; } return result; } catch (const zlib_error& e) { throw gzip_error(e); } } template<typename Source> void close(Source& src) { try { base_type::close(src, BOOST_IOS::in); flags_ = 0; } catch (const zlib_error& e) { throw gzip_error(e); } } std::string file_name() const { return file_name_; } std::string comment() const { return comment_; } bool text() const { return (flags_ & gzip::flags::text) != 0; } int os() const { return os_; } std::time_t mtime() const { return mtime_; } private: typedef basic_zlib_decompressor<Alloc> base_type; typedef BOOST_IOSTREAMS_CHAR_TRAITS(char) traits_type; static bool is_eof(int c) { return traits_type::eq_int_type(c, EOF); } static gzip_params make_params(int window_bits); template<typename Source> static uint8_t read_uint8(Source& src, int error) { int c; if ((c = boost::iostreams::get(src)) == EOF || c == WOULD_BLOCK) throw gzip_error(error); return static_cast<uint8_t>(traits_type::to_char_type(c)); } template<typename Source> static uint32_t read_uint32(Source& src, int error) { uint8_t b1 = read_uint8(src, error); uint8_t b2 = read_uint8(src, error); uint8_t b3 = read_uint8(src, error); uint8_t b4 = read_uint8(src, error); return b1 + (b2 << 8) + (b3 << 16) + (b4 << 24); } template<typename Source> std::string read_string(Source& src) { std::string result; while (true) { int c; if (is_eof(c = boost::iostreams::get(src))) throw gzip_error(gzip::bad_header); else if (c == 0) return result; else result += static_cast<char>(c); } } template<typename Source> void read_header(Source& src) // Source is non-blocking. { // Reset saved values. #if BOOST_WORKAROUND(__GNUC__, == 2) && defined(__STL_CONFIG_H) || \ BOOST_WORKAROUND(BOOST_DINKUMWARE_STDLIB, == 1) \ /**/ file_name_.erase(0, std::string::npos); comment_.erase(0, std::string::npos); #else file_name_.clear(); comment_.clear(); #endif os_ = gzip::os_unknown; mtime_ = 0; int flags; // Read header, without checking header crc. if ( boost::iostreams::get(src) != gzip::magic::id1 || // ID1. boost::iostreams::get(src) != gzip::magic::id2 || // ID2. is_eof(boost::iostreams::get(src)) || // CM. is_eof(flags = boost::iostreams::get(src)) ) // FLG. { throw gzip_error(gzip::bad_header); } mtime_ = read_uint32(src, gzip::bad_header); // MTIME. read_uint8(src, gzip::bad_header); // XFL. os_ = read_uint8(src, gzip::bad_header); // OS. if (flags & boost::iostreams::gzip::flags::text) flags_ |= f_text; // Skip extra field. (From J. Halleaux; see note at top.) if (flags & gzip::flags::extra) { int length = static_cast<int>( read_uint8(src, gzip::bad_header) + (read_uint8(src, gzip::bad_header) << 8) ); // length is garbage if EOF but the loop below will quit anyway. do { } while (length-- != 0 && !is_eof(boost::iostreams::get(src))); } if (flags & gzip::flags::name) // Read file name. file_name_ = read_string(src); if (flags & gzip::flags::comment) // Read comment. comment_ = read_string(src); if (flags & gzip::flags::header_crc) { // Skip header crc. read_uint8(src, gzip::bad_header); read_uint8(src, gzip::bad_header); } } template<typename Source> void read_footer(Source& src) { typename base_type::string_type footer = this->unconsumed_input(); int c; while (!is_eof(c = boost::iostreams::get(src))) footer += c; detail::range_adapter<input, std::string> rng(footer.begin(), footer.end()); if (read_uint32(rng, gzip::bad_footer) != this->crc()) throw gzip_error(gzip::bad_crc); if (static_cast<int>(read_uint32(rng, gzip::bad_footer)) != this->total_out()) throw gzip_error(gzip::bad_length); } enum flag_type { f_header_read = 1, f_footer_read = f_header_read << 1, f_text = f_footer_read << 1 }; std::string file_name_; std::string comment_; int os_; std::time_t mtime_; int flags_; }; BOOST_IOSTREAMS_PIPABLE(basic_gzip_decompressor, 1) typedef basic_gzip_decompressor<> gzip_decompressor; //------------------Implementation of gzip_compressor-------------------------// template<typename Alloc> basic_gzip_compressor<Alloc>::basic_gzip_compressor (const gzip_params& p, int buffer_size) : base_type(normalize_params(p), buffer_size), offset_(0), flags_(0) { // Calculate gzip header. bool has_name = !p.file_name.empty(); bool has_comment = !p.comment.empty(); std::string::size_type length = 10 + (has_name ? p.file_name.size() + 1 : 0) + (has_comment ? p.comment.size() + 1 : 0); // + 2; // Header crc confuses gunzip. int flags = //gzip::flags::header_crc + (has_name ? gzip::flags::name : 0) + (has_comment ? gzip::flags::comment : 0); int extra_flags = ( p.level == zlib::best_compression ? gzip::extra_flags::best_compression : 0 ) + ( p.level == zlib::best_speed ? gzip::extra_flags::best_speed : 0 ); header_.reserve(length); header_ += gzip::magic::id1; // ID1. header_ += gzip::magic::id2; // ID2. header_ += gzip::method::deflate; // CM. header_ += static_cast<char>(flags); // FLG. header_ += static_cast<char>(0xFF & p.mtime); // MTIME. header_ += static_cast<char>(0xFF & (p.mtime >> 8)); header_ += static_cast<char>(0xFF & (p.mtime >> 16)); header_ += static_cast<char>(0xFF & (p.mtime >> 24)); header_ += static_cast<char>(extra_flags); // XFL. header_ += static_cast<char>(gzip::os_unknown); // OS. if (has_name) { header_ += p.file_name; header_ += '\0'; } if (has_comment) { header_ += p.comment; header_ += '\0'; } } template<typename Alloc> gzip_params basic_gzip_compressor<Alloc>::normalize_params(gzip_params p) { p.noheader = true; p.calculate_crc = true; return p; } template<typename Alloc> void basic_gzip_compressor<Alloc>::prepare_footer() { boost::iostreams::back_insert_device<std::string> out(footer_); write_long(this->crc(), out); write_long(this->total_in(), out); flags_ |= f_body_done; offset_ = 0; } template<typename Alloc> std::streamsize basic_gzip_compressor<Alloc>::read_string (char* s, std::streamsize n, std::string& str) { using namespace std; streamsize avail = static_cast<streamsize>(str.size() - offset_); streamsize amt = (std::min)(avail, n); std::copy( str.data() + offset_, str.data() + offset_ + amt, s ); offset_ += amt; if ( !(flags_ & f_header_done) && offset_ == static_cast<std::size_t>(str.size()) ) { flags_ |= f_header_done; } return amt; } //------------------Implementation of gzip_decompressor-----------------------// template<typename Alloc> basic_gzip_decompressor<Alloc>::basic_gzip_decompressor (int window_bits, int buffer_size) : base_type(make_params(window_bits), buffer_size), os_(gzip::os_unknown), mtime_(0), flags_(0) { } template<typename Alloc> gzip_params basic_gzip_decompressor<Alloc>::make_params(int window_bits) { gzip_params p; p.window_bits = window_bits; p.noheader = true; p.calculate_crc = true; return p; } //----------------------------------------------------------------------------// } } // End namespaces iostreams, boost. #if defined(BOOST_MSVC) # pragma warning(pop) #endif #endif // #ifndef BOOST_IOSTREAMS_GZIP_HPP_INCLUDED
#include "Copter.h" // adjust_climb_rate - hold copter at the desired distance above the // ground; returns climb rate (in cm/s) which should be passed to // the position controller float Copter::SurfaceTracking::adjust_climb_rate(float target_rate) { #if RANGEFINDER_ENABLED == ENABLED // check tracking state and that range finders are healthy if ((surface == Surface::NONE) || ((surface == Surface::GROUND) && (!copter.rangefinder_alt_ok() || (copter.rangefinder_state.glitch_count != 0))) || ((surface == Surface::CEILING) && !copter.rangefinder_up_ok()) || (copter.rangefinder_up_state.glitch_count != 0)) { return target_rate; } // calculate current ekf based altitude error const float current_alt_error = copter.pos_control->get_alt_target() - copter.inertial_nav.get_altitude(); // init based on tracking direction/state RangeFinderState &rf_state = (surface == Surface::GROUND) ? copter.rangefinder_state : copter.rangefinder_up_state; const float dir = (surface == Surface::GROUND) ? 1.0f : -1.0f; // reset target altitude if this controller has just been engaged // target has been changed between upwards vs downwards // or glitch has cleared const uint32_t now = millis(); if ((now - last_update_ms > SURFACE_TRACKING_TIMEOUT_MS) || reset_target || (last_glitch_cleared_ms != rf_state.glitch_cleared_ms)) { target_dist_cm = rf_state.alt_cm + (dir * current_alt_error); reset_target = false; last_glitch_cleared_ms = rf_state.glitch_cleared_ms;\ } last_update_ms = now; // adjust rangefinder target alt if motors have not hit their limits if ((target_rate<0 && !copter.motors->limit.throttle_lower) || (target_rate>0 && !copter.motors->limit.throttle_upper)) { target_dist_cm += dir * target_rate * copter.G_Dt; } valid_for_logging = true; #if AC_AVOID_ENABLED == ENABLED // upward facing terrain following never gets closer than avoidance margin if (surface == Surface::CEILING) { const float margin_cm = copter.avoid.enabled() ? copter.avoid.get_margin() * 100.0f : 0.0f; target_dist_cm = MAX(target_dist_cm, margin_cm); } #endif // calc desired velocity correction from target rangefinder alt vs actual rangefinder alt (remove the error already passed to Altitude controller to avoid oscillations) const float distance_error = (target_dist_cm - rf_state.alt_cm) - (dir * current_alt_error); float velocity_correction = dir * distance_error * copter.g.rangefinder_gain; velocity_correction = constrain_float(velocity_correction, -SURFACE_TRACKING_VELZ_MAX, SURFACE_TRACKING_VELZ_MAX); // return combined pilot climb rate + rate to correct rangefinder alt error return (target_rate + velocity_correction); #else return target_rate; #endif } // get target altitude (in cm) above ground // returns true if there is a valid target bool Copter::SurfaceTracking::get_target_alt_cm(float &_target_alt_cm) const { // fail if we are not tracking downwards if (surface != Surface::GROUND) { return false; } // check target has been updated recently if (AP_HAL::millis() - last_update_ms > SURFACE_TRACKING_TIMEOUT_MS) { return false; } _target_alt_cm = target_dist_cm; return true; } // set target altitude (in cm) above ground void Copter::SurfaceTracking::set_target_alt_cm(float _target_alt_cm) { // fail if we are not tracking downwards if (surface != Surface::GROUND) { return; } target_dist_cm = _target_alt_cm; last_update_ms = AP_HAL::millis(); } bool Copter::SurfaceTracking::get_target_dist_for_logging(float &target_dist) const { if (!valid_for_logging || (surface == Surface::NONE)) { return false; } target_dist = target_dist_cm * 0.01f; return true; } float Copter::SurfaceTracking::get_dist_for_logging() const { return ((surface == Surface::CEILING) ? copter.rangefinder_up_state.alt_cm : copter.rangefinder_state.alt_cm) * 0.01f; } // set direction void Copter::SurfaceTracking::set_surface(Surface new_surface) { if (surface == new_surface) { return; } // check we have a range finder in the correct direction if ((new_surface == Surface::GROUND) && !copter.rangefinder.has_orientation(ROTATION_PITCH_270)) { copter.gcs().send_text(MAV_SEVERITY_WARNING, "SurfaceTracking: no downward rangefinder"); AP_Notify::events.user_mode_change_failed = 1; return; } if ((new_surface == Surface::CEILING) && !copter.rangefinder.has_orientation(ROTATION_PITCH_90)) { copter.gcs().send_text(MAV_SEVERITY_WARNING, "SurfaceTracking: no upward rangefinder"); AP_Notify::events.user_mode_change_failed = 1; return; } surface = new_surface; reset_target = true; }
/* -*- c++ -*- */ /* * Copyright 2012 Free Software Foundation, Inc. * * This file is part of GNU Radio * * GNU Radio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3, or (at your option) * any later version. * * GNU Radio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Radio; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #ifndef INCLUDED_KEEP_ONE_IN_N_IMPL_H #define INCLUDED_KEEP_ONE_IN_N_IMPL_H #include <gnuradio/blocks/keep_one_in_n.h> namespace gr { namespace blocks { class BLOCKS_API keep_one_in_n_impl : public keep_one_in_n { int d_n; int d_count; float d_decim_rate; public: keep_one_in_n_impl(size_t itemsize,int n); int general_work(int noutput_items, gr_vector_int &ninput_items, gr_vector_const_void_star &input_items, gr_vector_void_star &output_items); void set_n(int n); }; } /* namespace blocks */ } /* namespace gr */ #endif /* INCLUDED_KEEP_ONE_IN_N_IMPL_H */
M.tool_assignmentupgrade = { init_upgrade_table: function(Y) { Y.use('node', function(Y) { checkboxes = Y.all('td.c0 input'); checkboxes.each(function(node) { node.on('change', function(e) { rowelement = e.currentTarget.get('parentNode').get('parentNode'); if (e.currentTarget.get('checked')) { rowelement.setAttribute('class', 'selectedrow'); } else { rowelement.setAttribute('class', 'unselectedrow'); } }); rowelement = node.get('parentNode').get('parentNode'); if (node.get('checked')) { rowelement.setAttribute('class', 'selectedrow'); } else { rowelement.setAttribute('class', 'unselectedrow'); } }); }); var selectall = Y.one('th.c0 input'); selectall.on('change', function(e) { if (e.currentTarget.get('checked')) { checkboxes = Y.all('td.c0 input'); checkboxes.each(function(node) { rowelement = node.get('parentNode').get('parentNode'); node.set('checked', true); rowelement.setAttribute('class', 'selectedrow'); }); } else { checkboxes = Y.all('td.c0 input'); checkboxes.each(function(node) { rowelement = node.get('parentNode').get('parentNode'); node.set('checked', false); rowelement.setAttribute('class', 'unselectedrow'); }); } }); var batchform = Y.one('.tool_assignmentupgrade_batchform form'); batchform.on('submit', function(e) { checkboxes = Y.all('td.c0 input'); var selectedassignments = []; checkboxes.each(function(node) { if (node.get('checked')) { selectedassignments[selectedassignments.length] = node.get('value'); } }); operation = Y.one('#id_operation'); assignmentsinput = Y.one('input.selectedassignments'); assignmentsinput.set('value', selectedassignments.join(',')); if (selectedassignments.length == 0) { alert(M.str.assign.noassignmentsselected); e.preventDefault(); } }); var perpage = Y.one('#id_perpage'); perpage.on('change', function(e) { window.onbeforeunload = null; Y.one('.tool_assignmentupgrade_paginationform form').submit(); }); } }
######################################################################## # ## --- CAEN SpA - Computing Division --- # ## CAENDigitizer Software Project # ## Created : October 2009 (Rel. 1.0) # ## Auth: A. Lucchesi # ######################################################################### ARCH = `uname -m` OUTDIR = ./bin/$(ARCH)/Release/ OUTNAME = ReadoutTest_DPP_PSD_x720.bin OUT = $(OUTDIR)/$(OUTNAME) CC = gcc COPTS = -fPIC -DLINUX -O2 #FLAGS = -soname -s #FLAGS = -Wall,-soname -s #FLAGS = -Wall,-soname -nostartfiles -s #FLAGS = -Wall,-soname DEPLIBS = -lCAENDigitizer LIBS = -L.. INCLUDEDIR = -I./include OBJS = src/ReadoutTest_DPP_PSD_x720.o src/keyb.o src/Functions.o INCLUDES = ./include/* ######################################################################### all : $(OUT) clean : /bin/rm -f $(OBJS) $(OUT) $(OUT) : $(OBJS) /bin/rm -f $(OUT) if [ ! -d $(OUTDIR) ]; then mkdir -p $(OUTDIR); fi $(CC) $(FLAGS) -o $(OUT) $(OBJS) $(DEPLIBS) $(OBJS) : $(INCLUDES) Makefile %.o : %.c $(CC) $(COPTS) $(INCLUDEDIR) -c -o $@ $<
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <title>Kyoto Tycoon: kyototycoon::RemoteDB::Cursor Class Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">Kyoto Tycoon </div> </td> </tr> </tbody> </table> </div> <!-- Generated by Doxygen 1.7.6.1 --> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="namespaces.html"><span>Namespaces</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> <div id="nav-path" class="navpath"> <ul> <li class="navelem"><a class="el" href="namespacekyototycoon.html">kyototycoon</a> </li> <li class="navelem"><a class="el" href="classkyototycoon_1_1RemoteDB.html">RemoteDB</a> </li> <li class="navelem"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html">Cursor</a> </li> </ul> </div> </div> <div class="header"> <div class="summary"> <a href="#pub-methods">Public Member Functions</a> &#124; <a href="#friends">Friends</a> </div> <div class="headertitle"> <div class="title">kyototycoon::RemoteDB::Cursor Class Reference</div> </div> </div><!--header--> <div class="contents"> <!-- doxytag: class="kyototycoon::RemoteDB::Cursor" --> <p><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html" title="Cursor to indicate a record.">Cursor</a> to indicate a record. <a href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#details">More...</a></p> <p><code>#include &lt;ktremotedb.h&gt;</code></p> <p><a href="classkyototycoon_1_1RemoteDB_1_1Cursor-members.html">List of all members.</a></p> <table class="memberdecls"> <tr><td colspan="2"><h2><a name="pub-methods"></a> Public Member Functions</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a0cc3bb3071eefbcec2e91f53b0fe8af4">Cursor</a> (<a class="el" href="classkyototycoon_1_1RemoteDB.html">RemoteDB</a> *<a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a416bd06dbcf0162fd8abdd129c887b51">db</a>)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructor. <a href="#a0cc3bb3071eefbcec2e91f53b0fe8af4"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">virtual&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ada51ddbe2aa9054a774ebb704cccfcb6">~Cursor</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Destructor. <a href="#ada51ddbe2aa9054a774ebb704cccfcb6"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a063e7d2447cf42a084a362e665e9ebc5">jump</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to the first record for forward scan. <a href="#a063e7d2447cf42a084a362e665e9ebc5"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a0026dbd36aff00bd5388246a1a69bd57">jump</a> (const char *kbuf, size_t ksiz)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to a record for forward scan. <a href="#a0026dbd36aff00bd5388246a1a69bd57"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#abbcb45db72373c2dacd61eaf1b7f3445">jump</a> (const std::string &amp;key)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to a record for forward scan. <a href="#abbcb45db72373c2dacd61eaf1b7f3445"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#afcee9f6147e0a0a905c3b65d48bb4421">jump_back</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to the last record for backward scan. <a href="#afcee9f6147e0a0a905c3b65d48bb4421"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a4ed775b5346f9ebeb72dffbceed73a9f">jump_back</a> (const char *kbuf, size_t ksiz)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to a record for backward scan. <a href="#a4ed775b5346f9ebeb72dffbceed73a9f"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a113ddb1a70a5e04a70398cfdffcce0fd">jump_back</a> (const std::string &amp;key)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Jump the cursor to a record for backward scan. <a href="#a113ddb1a70a5e04a70398cfdffcce0fd"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Step the cursor to the next record. <a href="#a713c2689362977000f19511b8acdf00e"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#adea62efb8472a9d68cd78a85241c700a">step_back</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Step the cursor to the previous record. <a href="#adea62efb8472a9d68cd78a85241c700a"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aa7fd26a093f5826b19f316c3a8a49de8">set_value</a> (const char *vbuf, size_t vsiz, int64_t xt=kc::INT64MAX, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Set the value of the current record. <a href="#aa7fd26a093f5826b19f316c3a8a49de8"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ad4dd40c17e7f4c7472d93f837b5829df">set_value_str</a> (const std::string &amp;value, int64_t xt=kc::INT64MAX, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Set the value of the current record. <a href="#ad4dd40c17e7f4c7472d93f837b5829df"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a3bfe7831e74eb6e770081624e2b33c9a">remove</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Remove the current record. <a href="#a3bfe7831e74eb6e770081624e2b33c9a"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">char *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a33b854554a3670d956c7b1efccb99a06">get_key</a> (size_t *sp, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the key of the current record. <a href="#a33b854554a3670d956c7b1efccb99a06"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ac7de2636e758680c00027a949bb60d84">get_key</a> (std::string *key, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the key of the current record. <a href="#ac7de2636e758680c00027a949bb60d84"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">char *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ab10ad473c090792e171783ca67987528">get_value</a> (size_t *sp, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the value of the current record. <a href="#ab10ad473c090792e171783ca67987528"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a30f383c7a61ab1854ca3e9195183b0a7">get_value</a> (std::string *value, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the value of the current record. <a href="#a30f383c7a61ab1854ca3e9195183b0a7"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">char *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aeb882deaa1a99d1b80d00d1959bfbf66">get</a> (size_t *ksp, const char **vbp, size_t *vsp, int64_t *xtp=NULL, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get a pair of the key and the value of the current record. <a href="#aeb882deaa1a99d1b80d00d1959bfbf66"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aa5ed0f33e112d60a6abaeddfa56c9daa">get</a> (std::string *key, std::string *value, int64_t *xtp=NULL, bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">step</a>=false)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get a pair of the key and the value of the current record. <a href="#aa5ed0f33e112d60a6abaeddfa56c9daa"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">char *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ac342400f149bc59d15f037c11b3542cb">seize</a> (size_t *ksp, const char **vbp, size_t *vsp, int64_t *xtp=NULL)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get a pair of the key and the value of the current record and remove it atomically. <a href="#ac342400f149bc59d15f037c11b3542cb"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a5b1e562d376f3c405637dda43639b28c">seize</a> (std::string *key, std::string *value, int64_t *xtp=NULL)</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get a pair of the key and the value of the current record and remove it atomically. <a href="#a5b1e562d376f3c405637dda43639b28c"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="el" href="classkyototycoon_1_1RemoteDB.html">RemoteDB</a> *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a416bd06dbcf0162fd8abdd129c887b51">db</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the database object. <a href="#a416bd06dbcf0162fd8abdd129c887b51"></a><br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Error.html">Error</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ad73aa44cb2c65aa31f2ea46236de45fa">error</a> ()</td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Get the last happened error. <a href="#ad73aa44cb2c65aa31f2ea46236de45fa"></a><br/></td></tr> <tr><td colspan="2"><h2><a name="friends"></a> Friends</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a726a06fc5e5997aaf7dab1fe13ac5fcc"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::RemoteDB" ref="a726a06fc5e5997aaf7dab1fe13ac5fcc" args="" --> class&#160;</td><td class="memItemRight" valign="bottom"><b>RemoteDB</b></td></tr> </table> <hr/><a name="details" id="details"></a><h2>Detailed Description</h2> <div class="textblock"><p><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html" title="Cursor to indicate a record.">Cursor</a> to indicate a record. </p> </div><hr/><h2>Constructor &amp; Destructor Documentation</h2> <a class="anchor" id="a0cc3bb3071eefbcec2e91f53b0fe8af4"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::Cursor" ref="a0cc3bb3071eefbcec2e91f53b0fe8af4" args="(RemoteDB *db)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a0cc3bb3071eefbcec2e91f53b0fe8af4">kyototycoon::RemoteDB::Cursor::Cursor</a> </td> <td>(</td> <td class="paramtype"><a class="el" href="classkyototycoon_1_1RemoteDB.html">RemoteDB</a> *&#160;</td> <td class="paramname"><em>db</em></td><td>)</td> <td><code> [explicit]</code></td> </tr> </table> </div> <div class="memdoc"> <p>Constructor. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">db</td><td>the container database object. </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="ada51ddbe2aa9054a774ebb704cccfcb6"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::~Cursor" ref="ada51ddbe2aa9054a774ebb704cccfcb6" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">virtual <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ada51ddbe2aa9054a774ebb704cccfcb6">kyototycoon::RemoteDB::Cursor::~Cursor</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td><code> [virtual]</code></td> </tr> </table> </div> <div class="memdoc"> <p>Destructor. </p> </div> </div> <hr/><h2>Member Function Documentation</h2> <a class="anchor" id="a063e7d2447cf42a084a362e665e9ebc5"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump" ref="a063e7d2447cf42a084a362e665e9ebc5" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a063e7d2447cf42a084a362e665e9ebc5">kyototycoon::RemoteDB::Cursor::jump</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to the first record for forward scan. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> </div> </div> <a class="anchor" id="a0026dbd36aff00bd5388246a1a69bd57"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump" ref="a0026dbd36aff00bd5388246a1a69bd57" args="(const char *kbuf, size_t ksiz)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a063e7d2447cf42a084a362e665e9ebc5">kyototycoon::RemoteDB::Cursor::jump</a> </td> <td>(</td> <td class="paramtype">const char *&#160;</td> <td class="paramname"><em>kbuf</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">size_t&#160;</td> <td class="paramname"><em>ksiz</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to a record for forward scan. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">kbuf</td><td>the pointer to the key region. </td></tr> <tr><td class="paramname">ksiz</td><td>the size of the key region. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> </div> </div> <a class="anchor" id="abbcb45db72373c2dacd61eaf1b7f3445"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump" ref="abbcb45db72373c2dacd61eaf1b7f3445" args="(const std::string &amp;key)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a063e7d2447cf42a084a362e665e9ebc5">kyototycoon::RemoteDB::Cursor::jump</a> </td> <td>(</td> <td class="paramtype">const std::string &amp;&#160;</td> <td class="paramname"><em>key</em></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to a record for forward scan. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a063e7d2447cf42a084a362e665e9ebc5" title="Jump the cursor to the first record for forward scan.">Cursor::jump</a> method except that the parameter is std::string. </dd></dl> </div> </div> <a class="anchor" id="afcee9f6147e0a0a905c3b65d48bb4421"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump_back" ref="afcee9f6147e0a0a905c3b65d48bb4421" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#afcee9f6147e0a0a905c3b65d48bb4421">kyototycoon::RemoteDB::Cursor::jump_back</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to the last record for backward scan. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>This method is dedicated to tree databases. Some database types, especially hash databases, may provide a dummy implementation. </dd></dl> </div> </div> <a class="anchor" id="a4ed775b5346f9ebeb72dffbceed73a9f"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump_back" ref="a4ed775b5346f9ebeb72dffbceed73a9f" args="(const char *kbuf, size_t ksiz)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#afcee9f6147e0a0a905c3b65d48bb4421">kyototycoon::RemoteDB::Cursor::jump_back</a> </td> <td>(</td> <td class="paramtype">const char *&#160;</td> <td class="paramname"><em>kbuf</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">size_t&#160;</td> <td class="paramname"><em>ksiz</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to a record for backward scan. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">kbuf</td><td>the pointer to the key region. </td></tr> <tr><td class="paramname">ksiz</td><td>the size of the key region. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>This method is dedicated to tree databases. Some database types, especially hash databases, will provide a dummy implementation. </dd></dl> </div> </div> <a class="anchor" id="a113ddb1a70a5e04a70398cfdffcce0fd"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::jump_back" ref="a113ddb1a70a5e04a70398cfdffcce0fd" args="(const std::string &amp;key)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#afcee9f6147e0a0a905c3b65d48bb4421">kyototycoon::RemoteDB::Cursor::jump_back</a> </td> <td>(</td> <td class="paramtype">const std::string &amp;&#160;</td> <td class="paramname"><em>key</em></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Jump the cursor to a record for backward scan. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#afcee9f6147e0a0a905c3b65d48bb4421" title="Jump the cursor to the last record for backward scan.">Cursor::jump_back</a> method except that the parameter is std::string. </dd></dl> </div> </div> <a class="anchor" id="a713c2689362977000f19511b8acdf00e"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::step" ref="a713c2689362977000f19511b8acdf00e" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a713c2689362977000f19511b8acdf00e">kyototycoon::RemoteDB::Cursor::step</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Step the cursor to the next record. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> </div> </div> <a class="anchor" id="adea62efb8472a9d68cd78a85241c700a"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::step_back" ref="adea62efb8472a9d68cd78a85241c700a" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#adea62efb8472a9d68cd78a85241c700a">kyototycoon::RemoteDB::Cursor::step_back</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Step the cursor to the previous record. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>This method is dedicated to tree databases. Some database types, especially hash databases, may provide a dummy implementation. </dd></dl> </div> </div> <a class="anchor" id="aa7fd26a093f5826b19f316c3a8a49de8"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::set_value" ref="aa7fd26a093f5826b19f316c3a8a49de8" args="(const char *vbuf, size_t vsiz, int64_t xt=kc::INT64MAX, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aa7fd26a093f5826b19f316c3a8a49de8">kyototycoon::RemoteDB::Cursor::set_value</a> </td> <td>(</td> <td class="paramtype">const char *&#160;</td> <td class="paramname"><em>vbuf</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">size_t&#160;</td> <td class="paramname"><em>vsiz</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t&#160;</td> <td class="paramname"><em>xt</em> = <code>kc::INT64MAX</code>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Set the value of the current record. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">vbuf</td><td>the pointer to the value region. </td></tr> <tr><td class="paramname">vsiz</td><td>the size of the value region. </td></tr> <tr><td class="paramname">xt</td><td>the expiration time from now in seconds. If it is negative, the absolute value is treated as the epoch time. </td></tr> <tr><td class="paramname">step</td><td>true to move the cursor to the next record, or false for no move. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> </div> </div> <a class="anchor" id="ad4dd40c17e7f4c7472d93f837b5829df"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::set_value_str" ref="ad4dd40c17e7f4c7472d93f837b5829df" args="(const std::string &amp;value, int64_t xt=kc::INT64MAX, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ad4dd40c17e7f4c7472d93f837b5829df">kyototycoon::RemoteDB::Cursor::set_value_str</a> </td> <td>(</td> <td class="paramtype">const std::string &amp;&#160;</td> <td class="paramname"><em>value</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t&#160;</td> <td class="paramname"><em>xt</em> = <code>kc::INT64MAX</code>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Set the value of the current record. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aa7fd26a093f5826b19f316c3a8a49de8" title="Set the value of the current record.">Cursor::set_value</a> method except that the parameter is std::string. </dd></dl> </div> </div> <a class="anchor" id="a3bfe7831e74eb6e770081624e2b33c9a"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::remove" ref="a3bfe7831e74eb6e770081624e2b33c9a" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a3bfe7831e74eb6e770081624e2b33c9a">kyototycoon::RemoteDB::Cursor::remove</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Remove the current record. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>true on success, or false on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>If no record corresponds to the key, false is returned. The cursor is moved to the next record implicitly. </dd></dl> </div> </div> <a class="anchor" id="a33b854554a3670d956c7b1efccb99a06"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get_key" ref="a33b854554a3670d956c7b1efccb99a06" args="(size_t *sp, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">char* <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a33b854554a3670d956c7b1efccb99a06">kyototycoon::RemoteDB::Cursor::get_key</a> </td> <td>(</td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>sp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the key of the current record. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">sp</td><td>the pointer to the variable into which the size of the region of the return value is assigned. </td></tr> <tr><td class="paramname">step</td><td>true to move the cursor to the next record, or false for no move. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>the pointer to the key region of the current record, or NULL on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>If the cursor is invalidated, NULL is returned. Because an additional zero code is appended at the end of the region of the return value, the return value can be treated as a C-style string. Because the region of the return value is allocated with the the new[] operator, it should be released with the delete[] operator when it is no longer in use. </dd></dl> </div> </div> <a class="anchor" id="ac7de2636e758680c00027a949bb60d84"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get_key" ref="ac7de2636e758680c00027a949bb60d84" args="(std::string *key, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a33b854554a3670d956c7b1efccb99a06">kyototycoon::RemoteDB::Cursor::get_key</a> </td> <td>(</td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>key</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the key of the current record. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a33b854554a3670d956c7b1efccb99a06" title="Get the key of the current record.">Cursor::get_key</a> method except that a parameter is a string to contain the result and the return value is bool for success. </dd></dl> </div> </div> <a class="anchor" id="ab10ad473c090792e171783ca67987528"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get_value" ref="ab10ad473c090792e171783ca67987528" args="(size_t *sp, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">char* <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ab10ad473c090792e171783ca67987528">kyototycoon::RemoteDB::Cursor::get_value</a> </td> <td>(</td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>sp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the value of the current record. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">sp</td><td>the pointer to the variable into which the size of the region of the return value is assigned. </td></tr> <tr><td class="paramname">step</td><td>true to move the cursor to the next record, or false for no move. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>the pointer to the value region of the current record, or NULL on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>If the cursor is invalidated, NULL is returned. Because an additional zero code is appended at the end of the region of the return value, the return value can be treated as a C-style string. Because the region of the return value is allocated with the the new[] operator, it should be released with the delete[] operator when it is no longer in use. </dd></dl> </div> </div> <a class="anchor" id="a30f383c7a61ab1854ca3e9195183b0a7"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get_value" ref="a30f383c7a61ab1854ca3e9195183b0a7" args="(std::string *value, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ab10ad473c090792e171783ca67987528">kyototycoon::RemoteDB::Cursor::get_value</a> </td> <td>(</td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>value</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the value of the current record. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ab10ad473c090792e171783ca67987528" title="Get the value of the current record.">Cursor::get_value</a> method except that a parameter is a string to contain the result and the return value is bool for success. </dd></dl> </div> </div> <a class="anchor" id="aeb882deaa1a99d1b80d00d1959bfbf66"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get" ref="aeb882deaa1a99d1b80d00d1959bfbf66" args="(size_t *ksp, const char **vbp, size_t *vsp, int64_t *xtp=NULL, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">char* <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aeb882deaa1a99d1b80d00d1959bfbf66">kyototycoon::RemoteDB::Cursor::get</a> </td> <td>(</td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>ksp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">const char **&#160;</td> <td class="paramname"><em>vbp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>vsp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t *&#160;</td> <td class="paramname"><em>xtp</em> = <code>NULL</code>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get a pair of the key and the value of the current record. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">ksp</td><td>the pointer to the variable into which the size of the region of the return value is assigned. </td></tr> <tr><td class="paramname">vbp</td><td>the pointer to the variable into which the pointer to the value region is assigned. </td></tr> <tr><td class="paramname">vsp</td><td>the pointer to the variable into which the size of the value region is assigned. </td></tr> <tr><td class="paramname">xtp</td><td>the pointer to the variable into which the absolute expiration time is assigned. If it is NULL, it is ignored. </td></tr> <tr><td class="paramname">step</td><td>true to move the cursor to the next record, or false for no move. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>the pointer to the pair of the key region, or NULL on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>If the cursor is invalidated, NULL is returned. Because an additional zero code is appended at the end of each region of the key and the value, each region can be treated as a C-style string. The return value should be deleted explicitly by the caller with the detele[] operator. </dd></dl> </div> </div> <a class="anchor" id="aa5ed0f33e112d60a6abaeddfa56c9daa"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::get" ref="aa5ed0f33e112d60a6abaeddfa56c9daa" args="(std::string *key, std::string *value, int64_t *xtp=NULL, bool step=false)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aeb882deaa1a99d1b80d00d1959bfbf66">kyototycoon::RemoteDB::Cursor::get</a> </td> <td>(</td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>key</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>value</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t *&#160;</td> <td class="paramname"><em>xtp</em> = <code>NULL</code>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">bool&#160;</td> <td class="paramname"><em>step</em> = <code>false</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get a pair of the key and the value of the current record. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#aeb882deaa1a99d1b80d00d1959bfbf66" title="Get a pair of the key and the value of the current record.">Cursor::get</a> method except that parameters are strings to contain the result and the return value is bool for success. </dd></dl> </div> </div> <a class="anchor" id="ac342400f149bc59d15f037c11b3542cb"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::seize" ref="ac342400f149bc59d15f037c11b3542cb" args="(size_t *ksp, const char **vbp, size_t *vsp, int64_t *xtp=NULL)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">char* <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ac342400f149bc59d15f037c11b3542cb">kyototycoon::RemoteDB::Cursor::seize</a> </td> <td>(</td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>ksp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">const char **&#160;</td> <td class="paramname"><em>vbp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">size_t *&#160;</td> <td class="paramname"><em>vsp</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t *&#160;</td> <td class="paramname"><em>xtp</em> = <code>NULL</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get a pair of the key and the value of the current record and remove it atomically. </p> <dl class="params"><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">ksp</td><td>the pointer to the variable into which the size of the region of the return value is assigned. </td></tr> <tr><td class="paramname">vbp</td><td>the pointer to the variable into which the pointer to the value region is assigned. </td></tr> <tr><td class="paramname">vsp</td><td>the pointer to the variable into which the size of the value region is assigned. </td></tr> <tr><td class="paramname">xtp</td><td>the pointer to the variable into which the absolute expiration time is assigned. If it is NULL, it is ignored. </td></tr> </table> </dd> </dl> <dl class="return"><dt><b>Returns:</b></dt><dd>the pointer to the pair of the key region, or NULL on failure. </dd></dl> <dl class="note"><dt><b>Note:</b></dt><dd>If the cursor is invalidated, NULL is returned. Because an additional zero code is appended at the end of each region of the key and the value, each region can be treated as a C-style string. The return value should be deleted explicitly by the caller with the detele[] operator. The cursor is moved to the next record implicitly. </dd></dl> </div> </div> <a class="anchor" id="a5b1e562d376f3c405637dda43639b28c"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::seize" ref="a5b1e562d376f3c405637dda43639b28c" args="(std::string *key, std::string *value, int64_t *xtp=NULL)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">bool <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ac342400f149bc59d15f037c11b3542cb">kyototycoon::RemoteDB::Cursor::seize</a> </td> <td>(</td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>key</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">std::string *&#160;</td> <td class="paramname"><em>value</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">int64_t *&#160;</td> <td class="paramname"><em>xtp</em> = <code>NULL</code>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Get a pair of the key and the value of the current record and remove it atomically. </p> <dl class="note"><dt><b>Note:</b></dt><dd>Equal to the original <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ac342400f149bc59d15f037c11b3542cb" title="Get a pair of the key and the value of the current record and remove it atomically.">Cursor::seize</a> method except that parameters are strings to contain the result and the return value is bool for success. </dd></dl> </div> </div> <a class="anchor" id="a416bd06dbcf0162fd8abdd129c887b51"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::db" ref="a416bd06dbcf0162fd8abdd129c887b51" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname"><a class="el" href="classkyototycoon_1_1RemoteDB.html">RemoteDB</a>* <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#a416bd06dbcf0162fd8abdd129c887b51">kyototycoon::RemoteDB::Cursor::db</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the database object. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>the database object. </dd></dl> </div> </div> <a class="anchor" id="ad73aa44cb2c65aa31f2ea46236de45fa"></a><!-- doxytag: member="kyototycoon::RemoteDB::Cursor::error" ref="ad73aa44cb2c65aa31f2ea46236de45fa" args="()" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname"><a class="el" href="classkyototycoon_1_1RemoteDB_1_1Error.html">Error</a> <a class="el" href="classkyototycoon_1_1RemoteDB_1_1Cursor.html#ad73aa44cb2c65aa31f2ea46236de45fa">kyototycoon::RemoteDB::Cursor::error</a> </td> <td>(</td> <td class="paramname"></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Get the last happened error. </p> <dl class="return"><dt><b>Returns:</b></dt><dd>the last happened error. </dd></dl> </div> </div> </div><!-- contents --> <hr class="footer"/><address class="footer"><small> Generated on Fri May 25 2012 02:44:32 for Kyoto Tycoon by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.7.6.1 </small></address> </body> </html>
/* Copyright (C) 2014-2015 de4dot@gmail.com This file is part of dnSpy dnSpy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. dnSpy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with dnSpy. If not, see <http://www.gnu.org/licenses/>. */ using System; using dnlib.DotNet; using dnlib.PE; namespace dnSpy.AsmEditor.Module { static class ModuleUtils { public static ModuleDef CreateNetModule(string name, Guid mvid, ClrVersion clrVersion) { return CreateModule(name, mvid, clrVersion, ModuleKind.NetModule); } public static ModuleDef CreateModule(string name, Guid mvid, ClrVersion clrVersion, ModuleKind kind, ModuleDef existingModule = null) { var module = CreateModuleDef(name, mvid, clrVersion, existingModule); module.Kind = kind; module.Characteristics = Characteristics._32BitMachine | Characteristics.ExecutableImage; if (kind == ModuleKind.Dll || kind == ModuleKind.NetModule) module.Characteristics |= Characteristics.Dll; module.DllCharacteristics = DllCharacteristics.TerminalServerAware | DllCharacteristics.NoSeh | DllCharacteristics.NxCompat | DllCharacteristics.DynamicBase; return module; } static ModuleDef CreateModuleDef(string name, Guid mvid, ClrVersion clrVersion, ModuleDef existingModule) { var clrValues = ClrVersionValues.GetValues(clrVersion); ModuleDef module; if (existingModule == null) module = new ModuleDefUser(name, mvid, clrValues.CorLibRef); else { module = existingModule; module.Name = name; module.Mvid = mvid; OverwriteAssembly(module.CorLibTypes.AssemblyRef, clrValues.CorLibRef); } module.UpdateRowId(module); module.RuntimeVersion = clrValues.RuntimeVersion; module.Cor20HeaderRuntimeVersion = clrValues.Cor20HeaderRuntimeVersion; module.TablesHeaderVersion = clrValues.TablesHeaderVersion; module.Location = string.Empty; return module; } static void OverwriteAssembly(AssemblyRef dst, AssemblyRef src) { dst.Name = src.Name; dst.Version = src.Version; dst.PublicKeyOrToken = src.PublicKeyOrToken; dst.Culture = src.Culture; dst.Attributes = src.Attributes; dst.Hash = src.Hash; } public static AssemblyDef AddToNewAssemblyDef(ModuleDef module, ModuleKind moduleKind, out Characteristics characteristics) { var asmDef = module.UpdateRowId(new AssemblyDefUser(GetAssemblyName(module))); asmDef.Modules.Add(module); WriteNewModuleKind(module, moduleKind, out characteristics); return asmDef; } static string GetAssemblyName(ModuleDef module) { string name = module.Name; if (name.EndsWith(".exe", StringComparison.OrdinalIgnoreCase) || name.EndsWith(".dll", StringComparison.OrdinalIgnoreCase)) name = name.Substring(0, name.Length - 4); else if (name.EndsWith(".netmodule", StringComparison.OrdinalIgnoreCase)) name = name.Substring(0, name.Length - 10); if (!string.IsNullOrWhiteSpace(name)) return name; return module.Name; } public static void WriteNewModuleKind(ModuleDef module, ModuleKind moduleKind, out Characteristics characteristics) { module.Kind = moduleKind; characteristics = module.Characteristics; module.Characteristics = SaveModule.CharacteristicsHelper.GetCharacteristics(module.Characteristics, moduleKind); } } }
<?php /** * interface/therapy_groups/therapy_groups_models/therapy_groups_encounters_model.php contains the model for therapy group encounters. * * This model fetches the encounters for the therapy group from the DB. * * Copyright (C) 2016 Shachar Zilbershlag <shaharzi@matrix.co.il> * Copyright (C) 2016 Amiel Elboim <amielel@matrix.co.il> * * LICENSE: This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 3 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://opensource.org/licenses/gpl-license.php>;. * * @package OpenEMR * @author Shachar Zilbershlag <shaharzi@matrix.co.il> * @author Amiel Elboim <amielel@matrix.co.il> * @link http://www.open-emr.org */ class Therapy_Groups_Encounters { const TABLE = 'form_groups_encounter'; /** * Get all encounters of specified group. * @param $gid * @return ADORecordSet_mysqli */ public function getGroupEncounters($gid) { $sql = "SELECT * FROM " . self::TABLE . " WHERE group_id = ? AND date >= CURDATE();"; $result = sqlStatement($sql, array($gid)); while ($row = sqlFetchArray($result)) { $encounters[] = $row; } return $encounters; } }
#!/bin/sh # Test suite for xalloc_die. # Copyright (C) 2009-2014 Free Software Foundation, Inc. # This file is part of the GNUlib Library. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. . "${srcdir=.}/init.sh"; path_prepend_ . test-xalloc-die${EXEEXT} > out 2> err case $? in 1) ;; *) Exit 1;; esac tr -d '\015' < err \ | sed 's,.*test-xalloc-die[.ex]*:,test-xalloc-die:,' > err2 || Exit 1 compare - err2 <<\EOF || Exit 1 test-xalloc-die: memory exhausted EOF test -s out && Exit 1 Exit $fail
@echo off start-all WebServer
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto /* Package v1beta1 is a generated protocol buffer package. It is generated from these files: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto It has these top-level messages: MutatingWebhook MutatingWebhookConfiguration MutatingWebhookConfigurationList Rule RuleWithOperations ServiceReference ValidatingWebhook ValidatingWebhookConfiguration ValidatingWebhookConfigurationList WebhookClientConfig */ package v1beta1 import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import strings "strings" import reflect "reflect" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func init() { proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) n1, err := m.ClientConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.FailurePolicy != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i += copy(dAtA[i:], *m.FailurePolicy) } if m.NamespaceSelector != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) n2, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } if m.SideEffects != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) i += copy(dAtA[i:], *m.SideEffects) } if m.TimeoutSeconds != nil { dAtA[i] = 0x38 i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } if len(m.AdmissionReviewVersions) > 0 { for _, s := range m.AdmissionReviewVersions { dAtA[i] = 0x42 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if m.MatchPolicy != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) i += copy(dAtA[i:], *m.MatchPolicy) } if m.ReinvocationPolicy != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) i += copy(dAtA[i:], *m.ReinvocationPolicy) } if m.ObjectSelector != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) n3, err := m.ObjectSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 } return i, nil } func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 if len(m.Webhooks) > 0 { for _, msg := range m.Webhooks { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if len(m.APIVersions) > 0 { for _, s := range m.APIVersions { dAtA[i] = 0x12 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { for _, s := range m.Resources { dAtA[i] = 0x1a i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if m.Scope != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) i += copy(dAtA[i:], *m.Scope) } return i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Operations) > 0 { for _, s := range m.Operations { dAtA[i] = 0xa i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) n6, err := m.Rule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 return i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) i += copy(dAtA[i:], m.Namespace) dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) if m.Path != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i += copy(dAtA[i:], *m.Path) } if m.Port != nil { dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) } return i, nil } func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.FailurePolicy != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i += copy(dAtA[i:], *m.FailurePolicy) } if m.NamespaceSelector != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) n8, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n8 } if m.SideEffects != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) i += copy(dAtA[i:], *m.SideEffects) } if m.TimeoutSeconds != nil { dAtA[i] = 0x38 i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } if len(m.AdmissionReviewVersions) > 0 { for _, s := range m.AdmissionReviewVersions { dAtA[i] = 0x42 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } if m.MatchPolicy != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) i += copy(dAtA[i:], *m.MatchPolicy) } if m.ObjectSelector != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) n9, err := m.ObjectSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 } return i, nil } func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n10 if len(m.Webhooks) > 0 { for _, msg := range m.Webhooks { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Service != nil { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) n12, err := m.Service.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n12 } if m.CABundle != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) i += copy(dAtA[i:], m.CABundle) } if m.URL != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) i += copy(dAtA[i:], *m.URL) } return i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *MutatingWebhook) Size() (n int) { var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) l = m.ClientConfig.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Rules) > 0 { for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } if m.FailurePolicy != nil { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } if m.NamespaceSelector != nil { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.SideEffects != nil { l = len(*m.SideEffects) n += 1 + l + sovGenerated(uint64(l)) } if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } if len(m.AdmissionReviewVersions) > 0 { for _, s := range m.AdmissionReviewVersions { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } if m.MatchPolicy != nil { l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } if m.ReinvocationPolicy != nil { l = len(*m.ReinvocationPolicy) n += 1 + l + sovGenerated(uint64(l)) } if m.ObjectSelector != nil { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *MutatingWebhookConfiguration) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Webhooks) > 0 { for _, e := range m.Webhooks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *MutatingWebhookConfigurationList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *Rule) Size() (n int) { var l int _ = l if len(m.APIGroups) > 0 { for _, s := range m.APIGroups { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } if len(m.APIVersions) > 0 { for _, s := range m.APIVersions { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } if len(m.Resources) > 0 { for _, s := range m.Resources { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } if m.Scope != nil { l = len(*m.Scope) n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *RuleWithOperations) Size() (n int) { var l int _ = l if len(m.Operations) > 0 { for _, s := range m.Operations { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } l = m.Rule.Size() n += 1 + l + sovGenerated(uint64(l)) return n } func (m *ServiceReference) Size() (n int) { var l int _ = l l = len(m.Namespace) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) if m.Path != nil { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } if m.Port != nil { n += 1 + sovGenerated(uint64(*m.Port)) } return n } func (m *ValidatingWebhook) Size() (n int) { var l int _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) l = m.ClientConfig.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Rules) > 0 { for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } if m.FailurePolicy != nil { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } if m.NamespaceSelector != nil { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.SideEffects != nil { l = len(*m.SideEffects) n += 1 + l + sovGenerated(uint64(l)) } if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } if len(m.AdmissionReviewVersions) > 0 { for _, s := range m.AdmissionReviewVersions { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } if m.MatchPolicy != nil { l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } if m.ObjectSelector != nil { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *ValidatingWebhookConfiguration) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Webhooks) > 0 { for _, e := range m.Webhooks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *ValidatingWebhookConfigurationList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } return n } func (m *WebhookClientConfig) Size() (n int) { var l int _ = l if m.Service != nil { l = m.Service.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.CABundle != nil { l = len(m.CABundle) n += 1 + l + sovGenerated(uint64(l)) } if m.URL != nil { l = len(*m.URL) n += 1 + l + sovGenerated(uint64(l)) } return n } func sovGenerated(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *MutatingWebhook) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s } func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&MutatingWebhookConfiguration{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *MutatingWebhookConfigurationList) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *Rule) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Rule{`, `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, `Scope:` + valueToStringGenerated(this.Scope) + `,`, `}`, }, "") return s } func (this *RuleWithOperations) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuleWithOperations{`, `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *ServiceReference) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ServiceReference{`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s } func (this *ValidatingWebhook) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s } func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } func (this *WebhookClientConfig) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, }, "") return s } func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, RuleWithOperations{}) if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := FailurePolicyType(dAtA[iNdEx:postIndex]) m.FailurePolicy = &s iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := SideEffectClass(dAtA[iNdEx:postIndex]) m.SideEffects = &s iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.TimeoutSeconds = &v case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := MatchPolicyType(dAtA[iNdEx:postIndex]) m.MatchPolicy = &s iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) m.ReinvocationPolicy = &s iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ObjectSelector == nil { m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Webhooks = append(m.Webhooks, MutatingWebhook{}) if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Items = append(m.Items, MutatingWebhookConfiguration{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Rule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Rule: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := ScopeType(dAtA[iNdEx:postIndex]) m.Scope = &s iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.Path = &s iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Rules = append(m.Rules, RuleWithOperations{}) if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := FailurePolicyType(dAtA[iNdEx:postIndex]) m.FailurePolicy = &s iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := SideEffectClass(dAtA[iNdEx:postIndex]) m.SideEffects = &s iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } m.TimeoutSeconds = &v case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := MatchPolicyType(dAtA[iNdEx:postIndex]) m.MatchPolicy = &s iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ObjectSelector == nil { m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Items = append(m.Items, ValidatingWebhookConfiguration{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Service == nil { m.Service = &ServiceReference{} } if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) if m.CABundle == nil { m.CABundle = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.URL = &s iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowGenerated } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipGenerated(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ // 1113 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x1b, 0xc5, 0x1b, 0xcf, 0xc6, 0x76, 0x6d, 0x8f, 0x93, 0xa6, 0x99, 0xff, 0x9f, 0xd6, 0x84, 0xca, 0x6b, 0xf9, 0x80, 0x2c, 0x41, 0x77, 0x9b, 0x80, 0x10, 0x14, 0x10, 0xca, 0x06, 0x0a, 0x91, 0x92, 0x36, 0x4c, 0xfa, 0x22, 0xf1, 0x22, 0x75, 0xbc, 0x1e, 0xdb, 0x83, 0xed, 0x9d, 0xd5, 0xce, 0xac, 0x43, 0x6e, 0x7c, 0x04, 0xbe, 0x02, 0x27, 0x3e, 0x05, 0x07, 0x6e, 0xe1, 0xd6, 0x63, 0x2f, 0xac, 0xc8, 0x72, 0xe2, 0xc0, 0x81, 0x6b, 0x4e, 0x68, 0x66, 0xc7, 0xeb, 0x97, 0x4d, 0x8a, 0x29, 0xa2, 0x17, 0x7a, 0xdb, 0xf9, 0x3d, 0xf3, 0xfc, 0x9e, 0x97, 0xd9, 0xe7, 0xf9, 0x81, 0x4f, 0xfb, 0x6f, 0x73, 0x8b, 0x32, 0xbb, 0x1f, 0xb6, 0x48, 0xe0, 0x11, 0x41, 0xb8, 0x3d, 0x22, 0x5e, 0x9b, 0x05, 0xb6, 0x36, 0x60, 0x9f, 0xda, 0xb8, 0x3d, 0xa4, 0x9c, 0x53, 0xe6, 0x05, 0xa4, 0x4b, 0xb9, 0x08, 0xb0, 0xa0, 0xcc, 0xb3, 0x47, 0x9b, 0x2d, 0x22, 0xf0, 0xa6, 0xdd, 0x25, 0x1e, 0x09, 0xb0, 0x20, 0x6d, 0xcb, 0x0f, 0x98, 0x60, 0xb0, 0x99, 0x78, 0x5a, 0xd8, 0xa7, 0xd6, 0xb9, 0x9e, 0x96, 0xf6, 0xdc, 0xb8, 0xd1, 0xa5, 0xa2, 0x17, 0xb6, 0x2c, 0x97, 0x0d, 0xed, 0x2e, 0xeb, 0x32, 0x5b, 0x11, 0xb4, 0xc2, 0x8e, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x88, 0x37, 0xde, 0x9c, 0xa4, 0x34, 0xc4, 0x6e, 0x8f, 0x7a, 0x24, 0x38, 0xb6, 0xfd, 0x7e, 0x57, 0x02, 0xdc, 0x1e, 0x12, 0x81, 0xed, 0x51, 0x26, 0x9d, 0x0d, 0xfb, 0x22, 0xaf, 0x20, 0xf4, 0x04, 0x1d, 0x92, 0x8c, 0xc3, 0x5b, 0x7f, 0xe5, 0xc0, 0xdd, 0x1e, 0x19, 0xe2, 0x79, 0xbf, 0xc6, 0x4f, 0x45, 0xb0, 0xb6, 0x1f, 0x0a, 0x2c, 0xa8, 0xd7, 0x7d, 0x48, 0x5a, 0x3d, 0xc6, 0xfa, 0xb0, 0x0e, 0xf2, 0x1e, 0x1e, 0x92, 0xaa, 0x51, 0x37, 0x9a, 0x65, 0x67, 0xe5, 0x24, 0x32, 0x97, 0xe2, 0xc8, 0xcc, 0xdf, 0xc1, 0x43, 0x82, 0x94, 0x05, 0x1e, 0x81, 0x15, 0x77, 0x40, 0x89, 0x27, 0x76, 0x98, 0xd7, 0xa1, 0xdd, 0xea, 0x72, 0xdd, 0x68, 0x56, 0xb6, 0xde, 0xb7, 0x16, 0x6d, 0xa2, 0xa5, 0x43, 0xed, 0x4c, 0x91, 0x38, 0xff, 0xd7, 0x81, 0x56, 0xa6, 0x51, 0x34, 0x13, 0x08, 0x62, 0x50, 0x08, 0xc2, 0x01, 0xe1, 0xd5, 0x5c, 0x3d, 0xd7, 0xac, 0x6c, 0xbd, 0xb7, 0x78, 0x44, 0x14, 0x0e, 0xc8, 0x43, 0x2a, 0x7a, 0x77, 0x7d, 0x92, 0x58, 0xb8, 0xb3, 0xaa, 0x03, 0x16, 0xa4, 0x8d, 0xa3, 0x84, 0x19, 0xee, 0x81, 0xd5, 0x0e, 0xa6, 0x83, 0x30, 0x20, 0x07, 0x6c, 0x40, 0xdd, 0xe3, 0x6a, 0x5e, 0xb5, 0xe1, 0xd5, 0x38, 0x32, 0x57, 0x6f, 0x4f, 0x1b, 0xce, 0x22, 0x73, 0x7d, 0x06, 0xb8, 0x77, 0xec, 0x13, 0x34, 0xeb, 0x0c, 0xbf, 0x06, 0xeb, 0xb2, 0x63, 0xdc, 0xc7, 0x2e, 0x39, 0x24, 0x03, 0xe2, 0x0a, 0x16, 0x54, 0x0b, 0xaa, 0x5d, 0x6f, 0x4c, 0x25, 0x9f, 0xbe, 0x99, 0xe5, 0xf7, 0xbb, 0x12, 0xe0, 0x96, 0xfc, 0x35, 0xac, 0xd1, 0xa6, 0xb5, 0x87, 0x5b, 0x64, 0x30, 0x76, 0x75, 0x5e, 0x8a, 0x23, 0x73, 0xfd, 0xce, 0x3c, 0x23, 0xca, 0x06, 0x81, 0x1f, 0x82, 0x0a, 0xa7, 0x6d, 0xf2, 0x51, 0xa7, 0x43, 0x5c, 0xc1, 0xab, 0x97, 0x54, 0x15, 0x8d, 0x38, 0x32, 0x2b, 0x87, 0x13, 0xf8, 0x2c, 0x32, 0xd7, 0x26, 0xc7, 0x9d, 0x01, 0xe6, 0x1c, 0x4d, 0xbb, 0xc1, 0x5b, 0xe0, 0xb2, 0xfc, 0x7d, 0x58, 0x28, 0x0e, 0x89, 0xcb, 0xbc, 0x36, 0xaf, 0x16, 0xeb, 0x46, 0xb3, 0xe0, 0xc0, 0x38, 0x32, 0x2f, 0xdf, 0x9b, 0xb1, 0xa0, 0xb9, 0x9b, 0xf0, 0x3e, 0xb8, 0x96, 0xbe, 0x09, 0x22, 0x23, 0x4a, 0x8e, 0x1e, 0x90, 0x40, 0x1e, 0x78, 0xb5, 0x54, 0xcf, 0x35, 0xcb, 0xce, 0x2b, 0x71, 0x64, 0x5e, 0xdb, 0x3e, 0xff, 0x0a, 0xba, 0xc8, 0x57, 0x16, 0x36, 0xc4, 0xc2, 0xed, 0xe9, 0xe7, 0x29, 0x4f, 0x0a, 0xdb, 0x9f, 0xc0, 0xb2, 0xb0, 0xa9, 0xa3, 0x7a, 0x9a, 0x69, 0x37, 0xf8, 0x08, 0xc0, 0x80, 0x50, 0x6f, 0xc4, 0x5c, 0xf5, 0x37, 0x68, 0x32, 0xa0, 0xc8, 0x6e, 0xc6, 0x91, 0x09, 0x51, 0xc6, 0x7a, 0x16, 0x99, 0x57, 0xb3, 0xa8, 0xa2, 0x3e, 0x87, 0x0b, 0x32, 0x70, 0x99, 0xb5, 0xbe, 0x22, 0xae, 0x48, 0xdf, 0xbd, 0xf2, 0xec, 0xef, 0xae, 0xfa, 0x7d, 0x77, 0x86, 0x0e, 0xcd, 0xd1, 0x37, 0x7e, 0x36, 0xc0, 0xf5, 0xb9, 0x59, 0x4e, 0xc6, 0x26, 0x4c, 0xfe, 0x78, 0xf8, 0x08, 0x94, 0x24, 0x7b, 0x1b, 0x0b, 0xac, 0x86, 0xbb, 0xb2, 0x75, 0x73, 0xb1, 0x5c, 0x92, 0xc0, 0xfb, 0x44, 0x60, 0x07, 0xea, 0xa1, 0x01, 0x13, 0x0c, 0xa5, 0xac, 0xf0, 0x73, 0x50, 0xd2, 0x91, 0x79, 0x75, 0x59, 0x8d, 0xe8, 0x3b, 0x8b, 0x8f, 0xe8, 0x5c, 0xee, 0x4e, 0x5e, 0x86, 0x42, 0xa5, 0x23, 0x4d, 0xd8, 0xf8, 0xdd, 0x00, 0xf5, 0xa7, 0xd5, 0xb7, 0x47, 0xb9, 0x80, 0x5f, 0x64, 0x6a, 0xb4, 0x16, 0xec, 0x37, 0xe5, 0x49, 0x85, 0x57, 0x74, 0x85, 0xa5, 0x31, 0x32, 0x55, 0x5f, 0x1f, 0x14, 0xa8, 0x20, 0xc3, 0x71, 0x71, 0xb7, 0x9f, 0xb9, 0xb8, 0x99, 0xc4, 0x27, 0x9b, 0x68, 0x57, 0x92, 0xa3, 0x24, 0x46, 0xe3, 0x47, 0x03, 0xe4, 0xe5, 0x6a, 0x82, 0xaf, 0x81, 0x32, 0xf6, 0xe9, 0xc7, 0x01, 0x0b, 0x7d, 0x5e, 0x35, 0xd4, 0xe8, 0xac, 0xc6, 0x91, 0x59, 0xde, 0x3e, 0xd8, 0x4d, 0x40, 0x34, 0xb1, 0xc3, 0x4d, 0x50, 0xc1, 0x3e, 0x4d, 0x27, 0x6d, 0x59, 0x5d, 0x5f, 0x93, 0xe3, 0xb1, 0x7d, 0xb0, 0x9b, 0x4e, 0xd7, 0xf4, 0x1d, 0xc9, 0x1f, 0x10, 0xce, 0xc2, 0xc0, 0xd5, 0x9b, 0x55, 0xf3, 0xa3, 0x31, 0x88, 0x26, 0x76, 0xf8, 0x3a, 0x28, 0x70, 0x97, 0xf9, 0x44, 0xef, 0xc5, 0xab, 0x32, 0xed, 0x43, 0x09, 0x9c, 0x45, 0x66, 0x59, 0x7d, 0xa8, 0x89, 0x48, 0x2e, 0x35, 0xbe, 0x37, 0x00, 0xcc, 0xae, 0x5e, 0xf8, 0x01, 0x00, 0x2c, 0x3d, 0xe9, 0x92, 0x4c, 0xf5, 0x57, 0xa5, 0xe8, 0x59, 0x64, 0xae, 0xa6, 0x27, 0x45, 0x39, 0xe5, 0x02, 0x0f, 0x40, 0x5e, 0xae, 0x6b, 0xad, 0x3c, 0xd6, 0xdf, 0xd3, 0x81, 0x89, 0xa6, 0xc9, 0x13, 0x52, 0x4c, 0x8d, 0xef, 0x0c, 0x70, 0xe5, 0x90, 0x04, 0x23, 0xea, 0x12, 0x44, 0x3a, 0x24, 0x20, 0x9e, 0x4b, 0xa0, 0x0d, 0xca, 0xe9, 0x66, 0xd5, 0x7a, 0xb8, 0xae, 0x7d, 0xcb, 0xe9, 0x16, 0x46, 0x93, 0x3b, 0xa9, 0x76, 0x2e, 0x5f, 0xa8, 0x9d, 0xd7, 0x41, 0xde, 0xc7, 0xa2, 0x57, 0xcd, 0xa9, 0x1b, 0x25, 0x69, 0x3d, 0xc0, 0xa2, 0x87, 0x14, 0xaa, 0xac, 0x2c, 0x10, 0xaa, 0xb9, 0x05, 0x6d, 0x65, 0x81, 0x40, 0x0a, 0x6d, 0xfc, 0x76, 0x09, 0xac, 0x3f, 0xc0, 0x03, 0xda, 0x7e, 0xa1, 0xd7, 0x2f, 0xf4, 0xfa, 0xbf, 0xa5, 0xd7, 0x59, 0x35, 0x05, 0xff, 0xae, 0x9a, 0x9e, 0x1a, 0xa0, 0x96, 0x99, 0xb5, 0xe7, 0xad, 0xa7, 0x5f, 0x66, 0xf4, 0xf4, 0xdd, 0xc5, 0x47, 0x28, 0x93, 0x7d, 0x46, 0x51, 0xff, 0x30, 0x40, 0xe3, 0xe9, 0x35, 0x3e, 0x07, 0x4d, 0x1d, 0xce, 0x6a, 0xea, 0x27, 0xff, 0xa0, 0xc0, 0x45, 0x54, 0xf5, 0x07, 0x03, 0xfc, 0xef, 0x9c, 0x75, 0x06, 0x31, 0x28, 0xf2, 0x64, 0xfd, 0xeb, 0x1a, 0x6f, 0x2d, 0x9e, 0xc8, 0xbc, 0x6e, 0x38, 0x95, 0x38, 0x32, 0x8b, 0x63, 0x74, 0xcc, 0x0b, 0x9b, 0xa0, 0xe4, 0x62, 0x27, 0xf4, 0xda, 0x5a, 0xb8, 0x56, 0x9c, 0x15, 0xd9, 0x93, 0x9d, 0xed, 0x04, 0x43, 0xa9, 0x15, 0xbe, 0x0c, 0x72, 0x61, 0x30, 0xd0, 0x1a, 0x51, 0x8c, 0x23, 0x33, 0x77, 0x1f, 0xed, 0x21, 0x89, 0x39, 0x37, 0x4e, 0x4e, 0x6b, 0x4b, 0x8f, 0x4f, 0x6b, 0x4b, 0x4f, 0x4e, 0x6b, 0x4b, 0xdf, 0xc4, 0x35, 0xe3, 0x24, 0xae, 0x19, 0x8f, 0xe3, 0x9a, 0xf1, 0x24, 0xae, 0x19, 0xbf, 0xc4, 0x35, 0xe3, 0xdb, 0x5f, 0x6b, 0x4b, 0x9f, 0x15, 0x75, 0x6a, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x6f, 0x8b, 0x7e, 0x2c, 0x0f, 0x00, 0x00, }
// -- // Core.Agent.TableFilters.js - provides the special module functions for the dashboard // Copyright (C) 2001-2011 OTRS AG, http://otrs.org/ // -- // This software comes with ABSOLUTELY NO WARRANTY. For details, see // the enclosed file COPYING for license information (AGPL). If you // did not receive this file, see http://www.gnu.org/licenses/agpl.txt. // -- "use strict"; var Core = Core || {}; Core.Agent = Core.Agent || {}; /** * @namespace * @exports TargetNS as Core.Agent.TableFilters * @description * This namespace contains the special module functions for the Dashboard. */ Core.Agent.TableFilters = (function (TargetNS) { /* * check dependencies first */ if (!Core.Debug.CheckDependency('Core.Agent.TableFilters', 'Core.UI.AllocationList', 'Core.UI.AllocationList')) { return; } /** * @function * @param {jQueryObject} $Input Input element to add auto complete to * @return nothing */ TargetNS.InitCustomerIDAutocomplete = function ($Input) { $Input.autocomplete({ minLength: Core.Config.Get('CustomerAutocomplete.MinQueryLength'), delay: Core.Config.Get('CustomerAutocomplete.QueryDelay'), open: function() { // force a higher z-index than the overlay/dialog $(this).autocomplete('widget').addClass('ui-overlay-autocomplete'); return false; }, source: function (Request, Response) { var URL = Core.Config.Get('Baselink'), Data = { Action: 'AgentCustomerInformationCenterSearch', Subaction: 'SearchCustomerID', Term: Request.term, MaxResults: Core.Config.Get('CustomerAutocomplete.MaxResultsDisplayed') }; // if an old ajax request is already running, stop the old request and start the new one if ($Input.data('AutoCompleteXHR')) { $Input.data('AutoCompleteXHR').abort(); $Input.removeData('AutoCompleteXHR'); // run the response function to hide the request animation Response({}); } $Input.data('AutoCompleteXHR', Core.AJAX.FunctionCall(URL, Data, function (Result) { var Data = []; $Input.removeData('AutoCompleteXHR'); $.each(Result, function () { Data.push({ label: this.Label + ' (' + this.Value + ')', value: this.Value }); }); Response(Data); })); }, select: function (Event, UI) { $(Event.target) .parent() .find('select') .append('<option value="' + UI.item.value + '">SelectedItem</option>') .val(UI.item.value) .trigger('change'); } }); }; /** * @function * @param {jQueryObject} $Input Input element to add auto complete to * @param {String} Subaction Subaction to execute, "SearchCustomerID" or "SearchCustomerUser" * @return nothing */ TargetNS.InitCustomerUserAutocomplete = function ($Input) { $Input.autocomplete({ minLength: Core.Config.Get('CustomerUserAutocomplete.MinQueryLength'), delay: Core.Config.Get('CustomerUserAutocomplete.QueryDelay'), open: function() { // force a higher z-index than the overlay/dialog $(this).autocomplete('widget').addClass('ui-overlay-autocomplete'); return false; }, source: function (Request, Response) { var URL = Core.Config.Get('Baselink'), Data = { Action: 'AgentCustomerSearch', Term: Request.term, MaxResults: Core.Config.Get('CustomerUserAutocomplete.MaxResultsDisplayed') }; // if an old ajax request is already running, stop the old request and start the new one if ($Input.data('AutoCompleteXHR')) { $Input.data('AutoCompleteXHR').abort(); $Input.removeData('AutoCompleteXHR'); // run the response function to hide the request animation Response({}); } $Input.data('AutoCompleteXHR', Core.AJAX.FunctionCall(URL, Data, function (Result) { var Data = []; $Input.removeData('AutoCompleteXHR'); $.each(Result, function () { Data.push({ label: this.CustomerValue + " (" + this.CustomerKey + ")", value: this.CustomerValue, key: this.CustomerKey }); }); Response(Data); })); }, select: function (Event, UI) { $(Event.target) .parent() .find('select') .append('<option value="' + UI.item.key + '">SelectedItem</option>') .val(UI.item.key) .trigger('change'); } }); }; /** * @function * @param {jQueryObject} $Input Input element to add auto complete to * @param {String} Subaction Subaction to execute, "SearchCustomerID" or "SearchCustomerUser" * @return nothing */ TargetNS.InitUserAutocomplete = function ($Input, Subaction) { $Input.autocomplete({ minLength: Core.Config.Get('UserAutocomplete.MinQueryLength'), delay: Core.Config.Get('UserAutocomplete.QueryDelay'), open: function() { // force a higher z-index than the overlay/dialog $(this).autocomplete('widget').addClass('ui-overlay-autocomplete'); return false; }, source: function (Request, Response) { var URL = Core.Config.Get('Baselink'), Data = { Action: 'AgentUserSearch', Subaction: Subaction, Term: Request.term, MaxResults: Core.Config.Get('UserAutocomplete.MaxResultsDisplayed') }; // if an old ajax request is already running, stop the old request and start the new one if ($Input.data('AutoCompleteXHR')) { $Input.data('AutoCompleteXHR').abort(); $Input.removeData('AutoCompleteXHR'); // run the response function to hide the request animation Response({}); } $Input.data('AutoCompleteXHR', Core.AJAX.FunctionCall(URL, Data, function (Result) { var Data = []; $Input.removeData('AutoCompleteXHR'); $.each(Result, function () { Data.push({ label: this.UserValue + " (" + this.UserKey + ")", value: this.UserValue, key: this.UserKey }); }); Response(Data); })); }, select: function (Event, UI) { $(Event.target) .parent() .find('select') .append('<option value="' + UI.item.key + '">SelectedItem</option>') .val(UI.item.key) .trigger('change'); } }); }; /** * @function * @return nothing * This function initializes the special module functions */ TargetNS.Init = function () { // Initiate allocation list TargetNS.SetAllocationList(); }; /** * @function * @private * @param {string} FieldID Id of the field which is updated via ajax * @param {string} Show Show or hide the AJAX loader image * @description Shows and hides an ajax loader for every element which is updates via ajax */ function UpdateAllocationList(Event, UI) { var $ContainerObj = $(UI.sender).closest('.AllocationListContainer'), Data = {}, FieldName; if (Event.type === 'sortstop') { $ContainerObj = $(UI.item).closest('.AllocationListContainer'); } Data.Columns = {}; Data.Order = []; $ContainerObj.find('.AvailableFields').find('li').each(function() { FieldName = $(this).attr('data-fieldname'); Data.Columns[FieldName] = 0; }); $ContainerObj.find('.AssignedFields').find('li').each(function() { FieldName = $(this).attr('data-fieldname'); Data.Columns[FieldName] = 1; Data.Order.push(FieldName); }); $ContainerObj.closest('form').find('.ColumnsJSON').val(Core.JSON.Stringify(Data)); } /** * @function * @return nothing * This function binds a click event on an html element to update the preferences of the given dahsboard widget * @param {jQueryObject} $ClickedElement The jQuery object of the element(s) that get the event listener * @param {string} ElementID The ID of the element whose content should be updated with the server answer * @param {jQueryObject} $Form The jQuery object of the form with the data for the server request */ TargetNS.SetAllocationList = function (Event, UI) { $('.AllocationListContainer').each(function() { var $ContainerObj = $(this), DataEnabledJSON = $ContainerObj.closest('form.WidgetSettingsForm').find('input.ColumnsEnabledJSON').val(), DataAvailableJSON = $ContainerObj.closest('form.WidgetSettingsForm').find('input.ColumnsAvailableJSON').val(), DataEnabled, DataAvailable, Translation, $FieldObj, IDString = '#' + $ContainerObj.find('.AssignedFields').attr('id') + ', #' + $ContainerObj.find('.AvailableFields').attr('id'); if (DataEnabledJSON) { DataEnabled = Core.JSON.Parse(DataEnabledJSON); } if (DataAvailableJSON) { DataAvailable = Core.JSON.Parse(DataAvailableJSON); } $.each(DataEnabled, function(Index, Field) { // get field translation Translation = Core.Config.Get('Column' + Field) || Field; $FieldObj = $('<li />').attr('title', Field).attr('data-fieldname', Field).text(Translation); $ContainerObj.find('.AssignedFields').append($FieldObj); }); $.each(DataAvailable, function(Index, Field) { // get field translation Translation = Core.Config.Get('Column' + Field) || Field; $FieldObj = $('<li />').attr('title', Field).attr('data-fieldname', Field).text(Translation); $ContainerObj.find('.AvailableFields').append($FieldObj); }); Core.UI.AllocationList.Init(IDString, $ContainerObj.find('.AllocationList'), 'UpdateAllocationList', '', UpdateAllocationList); Core.UI.Table.InitTableFilter($ContainerObj.find('.FilterAvailableFields'), $ContainerObj.find('.AvailableFields')); }); }; /** * @function * @return nothing * This function binds a click event on an html element to update the preferences of the given dahsboard widget * @param {jQueryObject} $ClickedElement The jQuery object of the element(s) that get the event listener * @param {string} ElementID The ID of the element whose content should be updated with the server answer * @param {jQueryObject} $Form The jQuery object of the form with the data for the server request */ TargetNS.RegisterUpdatePreferences = function ($ClickedElement, ElementID, $Form) { if (isJQueryObject($ClickedElement) && $ClickedElement.length) { $ClickedElement.click(function () { var URL = Core.Config.Get('Baselink') + Core.AJAX.SerializeForm($Form); Core.AJAX.ContentUpdate($('#' + ElementID), URL, function () { Core.UI.ToggleTwoContainer($('#' + ElementID + '-setting'), $('#' + ElementID)); Core.UI.Table.InitCSSPseudoClasses(); }); return false; }); } }; return TargetNS; }(Core.Agent.TableFilters || {}));
<html> <header> <title> FreeType&nbsp;2 Project Files for VS.NET&nbsp;2010 </title> <body> <h1> FreeType&nbsp;2 Project Files for VS.NET&nbsp;2010 </h1> <p>This directory contains a project file for Visual C++, named <tt>freetype.vcxproj</tt>, and Visual Studio, called <tt>freetype.sln</tt>. It compiles the following libraries from the FreeType 2.4.11 sources:</p> <ul> <pre> freetype2411.lib - release build; single threaded freetype2411_D.lib - debug build; single threaded freetype2411MT.lib - release build; multi-threaded freetype2411MT_D.lib - debug build; multi-threaded</pre> </ul> <p>Be sure to extract the files with the Windows (CR+LF) line endings. ZIP archives are already stored this way, so no further action is required. If you use some <tt>.tar.*z</tt> archives, be sure to configure your extracting tool to convert the line endings. For example, with <a href="http://www.winzip.com">WinZip</a>, you should activate the <it>TAR file smart CR/LF Conversion</it> option. Alternatively, you may consider using the <tt>unix2dos</tt> or <tt>u2d</tt> utilities that are floating around, which specifically deal with this particular problem. <p>Build directories are placed in the top-level <tt>objs</tt> directory.</p> </body> </html>
<?php /** * @copyright Copyright (c) 2016, ownCloud, Inc. * * @author Joas Schilling <coding@schilljs.com> * @author Thomas Müller <thomas.mueller@tmit.eu> * * @license AGPL-3.0 * * This code is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License, version 3, * along with this program. If not, see <http://www.gnu.org/licenses/> * */ namespace OC\DB\QueryBuilder\ExpressionBuilder; use OC\DB\QueryBuilder\QueryFunction; use OCP\DB\QueryBuilder\IQueryBuilder; class PgSqlExpressionBuilder extends ExpressionBuilder { /** * Returns a IQueryFunction that casts the column to the given type * * @param string $column * @param mixed $type One of IQueryBuilder::PARAM_* * @return string */ public function castColumn($column, $type) { if ($type === IQueryBuilder::PARAM_INT) { $column = $this->helper->quoteColumnName($column); return new QueryFunction('CAST(' . $column . ' AS INT)'); } return parent::castColumn($column, $type); } /** * @inheritdoc */ public function iLike($x, $y, $type = null) { $x = $this->helper->quoteColumnName($x); $y = $this->helper->quoteColumnName($y); return $this->expressionBuilder->comparison($x, 'ILIKE', $y); } }
<?php /** * Shopware 5 * Copyright (c) shopware AG * * According to our dual licensing model, this program can be used either * under the terms of the GNU Affero General Public License, version 3, * or under a proprietary license. * * The texts of the GNU Affero General Public License with an additional * permission and of our proprietary license can be found at and * in the LICENSE file you have received along with this program. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * "Shopware" is a registered trademark of shopware AG. * The licensing of the program under the AGPLv3 does not imply a * trademark license. Therefore any rights, title and interest in * our trademarks remain entirely with us. */ namespace ShopwarePlugins\SwagUpdate\Components\Steps; /** * @category Shopware * * @copyright Copyright (c) shopware AG (http://www.shopware.de) */ class ErrorResult { /** * @var string */ private $message; /** * @var \Exception */ private $exception; /** * @var array */ private $args; /** * @param string $message * @param \Exception $exception * @param array $args */ public function __construct($message, \Exception $exception = null, $args = []) { $this->message = $message; $this->exception = $exception; $this->args = $args; } /** * @return array */ public function getArgs() { return $this->args; } /** * @return \Exception */ public function getException() { return $this->exception; } /** * @return string */ public function getMessage() { return $this->message; } }
/*************************************************************************** * Copyright (c) 2015 FreeCAD Developers * * Author: WandererFan <wandererfan@gmail.com> * * Based on src/Mod/FEM/Gui/DlgSettingsFEMImp.cpp * * * * This file is part of the FreeCAD CAx development system. * * * * This library is free software; you can redistribute it and/or * * modify it under the terms of the GNU Library General Public * * License as published by the Free Software Foundation; either * * version 2 of the License, or (at your option) any later version. * * * * This library is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU Library General Public License for more details. * * * * You should have received a copy of the GNU Library General Public * * License along with this library; see the file COPYING.LIB. If not, * * write to the Free Software Foundation, Inc., 59 Temple Place, * * Suite 330, Boston, MA 02111-1307, USA * * * ***************************************************************************/ #include "PreCompiled.h" #include <App/Application.h> #include <Base/Parameter.h> #include <Base/Console.h> #include "DrawGuiUtil.h" #include "PreferencesGui.h" #include "DlgPrefsTechDrawDimensionsImp.h" #include "ui_DlgPrefsTechDrawDimensions.h" using namespace TechDrawGui; using namespace TechDraw; DlgPrefsTechDrawDimensionsImp::DlgPrefsTechDrawDimensionsImp( QWidget* parent ) : PreferencePage( parent ) , ui(new Ui_DlgPrefsTechDrawDimensionsImp) { ui->setupUi(this); ui->plsb_FontSize->setUnit(Base::Unit::Length); ui->plsb_FontSize->setMinimum(0); ui->plsb_ArrowSize->setUnit(Base::Unit::Length); ui->plsb_ArrowSize->setMinimum(0); } DlgPrefsTechDrawDimensionsImp::~DlgPrefsTechDrawDimensionsImp() { // no need to delete child widgets, Qt does it all for us } void DlgPrefsTechDrawDimensionsImp::saveSettings() { ui->pcbStandardAndStyle->onSave(); ui->cbGlobalDecimals->onSave(); ui->cbShowUnits->onSave(); ui->sbAltDecimals->onSave(); ui->plsb_FontSize->onSave(); ui->pdsbToleranceScale->onSave(); ui->leDiameter->onSave(); ui->pcbArrow->onSave(); ui->plsb_ArrowSize->onSave(); } void DlgPrefsTechDrawDimensionsImp::loadSettings() { //set defaults for Quantity widgets if property not found //Quantity widgets do not use preset value since they are based on //QAbstractSpinBox double fontDefault = Preferences::dimFontSizeMM(); ui->plsb_FontSize->setValue(fontDefault); // double arrowDefault = 5.0; // plsb_ArrowSize->setValue(arrowDefault); ui->plsb_ArrowSize->setValue(fontDefault); ui->pcbStandardAndStyle->onRestore(); ui->cbGlobalDecimals->onRestore(); ui->cbShowUnits->onRestore(); ui->sbAltDecimals->onRestore(); ui->plsb_FontSize->onRestore(); ui->pdsbToleranceScale->onRestore(); ui->leDiameter->onRestore(); ui->pcbArrow->onRestore(); ui->plsb_ArrowSize->onRestore(); DrawGuiUtil::loadArrowBox(ui->pcbArrow); ui->pcbArrow->setCurrentIndex(prefArrowStyle()); } /** * Sets the strings of the subwidgets using the current language. */ void DlgPrefsTechDrawDimensionsImp::changeEvent(QEvent *e) { if (e->type() == QEvent::LanguageChange) { saveSettings(); ui->retranslateUi(this); loadSettings(); } else { QWidget::changeEvent(e); } } int DlgPrefsTechDrawDimensionsImp::prefArrowStyle(void) const { return PreferencesGui::dimArrowStyle(); } #include <Mod/TechDraw/Gui/moc_DlgPrefsTechDrawDimensionsImp.cpp>
/*************************************************************************** copyright : (C) 2002 - 2008 by Scott Wheeler email : wheeler@kde.org ***************************************************************************/ /*************************************************************************** * This library is free software; you can redistribute it and/or modify * * it under the terms of the GNU Lesser General Public License version * * 2.1 as published by the Free Software Foundation. * * * * This library is distributed in the hope that it will be useful, but * * WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * Lesser General Public License for more details. * * * * You should have received a copy of the GNU Lesser General Public * * License along with this library; if not, write to the Free Software * * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * * 02110-1301 USA * * * * Alternatively, this file is available under the Mozilla Public * * License Version 1.1. You may obtain a copy of the License at * * http://www.mozilla.org/MPL/ * ***************************************************************************/ #include <tbytevectorlist.h> #include <tpropertymap.h> #include <tdebug.h> #include "id3v2tag.h" #include "uniquefileidentifierframe.h" using namespace TagLib; using namespace ID3v2; class UniqueFileIdentifierFrame::UniqueFileIdentifierFramePrivate { public: String owner; ByteVector identifier; }; //////////////////////////////////////////////////////////////////////////////// // public methods //////////////////////////////////////////////////////////////////////////////// UniqueFileIdentifierFrame::UniqueFileIdentifierFrame(const ByteVector &data) : ID3v2::Frame(data), d(new UniqueFileIdentifierFramePrivate()) { setData(data); } UniqueFileIdentifierFrame::UniqueFileIdentifierFrame(const String &owner, const ByteVector &id) : ID3v2::Frame("UFID"), d(new UniqueFileIdentifierFramePrivate()) { d->owner = owner; d->identifier = id; } UniqueFileIdentifierFrame::~UniqueFileIdentifierFrame() { delete d; } String UniqueFileIdentifierFrame::owner() const { return d->owner; } ByteVector UniqueFileIdentifierFrame::identifier() const { return d->identifier; } void UniqueFileIdentifierFrame::setOwner(const String &s) { d->owner = s; } void UniqueFileIdentifierFrame::setIdentifier(const ByteVector &v) { d->identifier = v; } String UniqueFileIdentifierFrame::toString() const { return String(); } PropertyMap UniqueFileIdentifierFrame::asProperties() const { PropertyMap map; if(d->owner == "http://musicbrainz.org") { map.insert("MUSICBRAINZ_TRACKID", String(d->identifier)); } else { map.unsupportedData().append(frameID() + String("/") + d->owner); } return map; } UniqueFileIdentifierFrame *UniqueFileIdentifierFrame::findByOwner(const ID3v2::Tag *tag, const String &o) // static { ID3v2::FrameList comments = tag->frameList("UFID"); for(ID3v2::FrameList::ConstIterator it = comments.begin(); it != comments.end(); ++it) { UniqueFileIdentifierFrame *frame = dynamic_cast<UniqueFileIdentifierFrame *>(*it); if(frame && frame->owner() == o) return frame; } return 0; } void UniqueFileIdentifierFrame::parseFields(const ByteVector &data) { if(data.size() < 1) { debug("An UFID frame must contain at least 1 byte."); return; } int pos = 0; d->owner = readStringField(data, String::Latin1, &pos); d->identifier = data.mid(pos); } ByteVector UniqueFileIdentifierFrame::renderFields() const { ByteVector data; data.append(d->owner.data(String::Latin1)); data.append(char(0)); data.append(d->identifier); return data; } UniqueFileIdentifierFrame::UniqueFileIdentifierFrame(const ByteVector &data, Header *h) : Frame(h), d(new UniqueFileIdentifierFramePrivate()) { parseFields(fieldData(data)); }
/*************************************************************************** * Copyright (c) 2013 Werner Mayer <wmayer[at]users.sourceforge.net> * * * * This file is part of the FreeCAD CAx development system. * * * * This library is free software; you can redistribute it and/or * * modify it under the terms of the GNU Library General Public * * License as published by the Free Software Foundation; either * * version 2 of the License, or (at your option) any later version. * * * * This library is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU Library General Public License for more details. * * * * You should have received a copy of the GNU Library General Public * * License along with this library; see the file COPYING.LIB. If not, * * write to the Free Software Foundation, Inc., 59 Temple Place, * * Suite 330, Boston, MA 02111-1307, USA * * * ***************************************************************************/ #include "PreCompiled.h" #if defined(__MINGW32__) # define WNT // avoid conflict with GUID #endif #ifndef _PreComp_ # include <climits> # include <sstream> # include <Standard_Version.hxx> # include <BRep_Builder.hxx> # include <Handle_TDocStd_Document.hxx> # include <Handle_XCAFApp_Application.hxx> # include <TDocStd_Document.hxx> # include <XCAFApp_Application.hxx> # include <XCAFDoc_DocumentTool.hxx> # include <XCAFDoc_ShapeTool.hxx> # include <XCAFDoc_ColorTool.hxx> # include <XCAFDoc_Location.hxx> # include <TDF_Label.hxx> # include <TDF_LabelSequence.hxx> # include <TDF_ChildIterator.hxx> # include <TDataStd_Name.hxx> # include <Quantity_Color.hxx> # include <STEPCAFControl_Reader.hxx> # include <STEPCAFControl_Writer.hxx> # include <STEPControl_Writer.hxx> # include <IGESCAFControl_Reader.hxx> # include <IGESCAFControl_Writer.hxx> # include <IGESControl_Controller.hxx> # include <Interface_Static.hxx> # include <Transfer_TransientProcess.hxx> # include <XSControl_WorkSession.hxx> # include <TopTools_IndexedMapOfShape.hxx> # include <TopTools_MapOfShape.hxx> # include <TopExp_Explorer.hxx> # include <TopoDS_Iterator.hxx> # include <APIHeaderSection_MakeHeader.hxx> # include <OSD_Exception.hxx> #if OCC_VERSION_HEX >= 0x060500 # include <TDataXtd_Shape.hxx> # else # include <TDataStd_Shape.hxx> # endif #endif #include "ImportOCAFAssembly.h" #include <Base/Console.h> #include <App/Application.h> #include <App/Document.h> #include <App/DocumentObjectPy.h> #include <Mod/Part/App/PartFeature.h> #include <Mod/Part/App/ProgressIndicator.h> #include <Mod/Part/App/ImportIges.h> #include <Mod/Part/App/ImportStep.h> using namespace Import; ImportOCAFAssembly::ImportOCAFAssembly(Handle_TDocStd_Document h, App::Document* d, const std::string& name, App::DocumentObject *target) : pDoc(h), doc(d), default_name(name), targetObj(target) { aShapeTool = XCAFDoc_DocumentTool::ShapeTool (pDoc->Main()); aColorTool = XCAFDoc_DocumentTool::ColorTool(pDoc->Main()); } ImportOCAFAssembly::~ImportOCAFAssembly() { } void ImportOCAFAssembly::loadShapes() { myRefShapes.clear(); loadShapes(pDoc->Main(), TopLoc_Location(), default_name, "", false,0); } void ImportOCAFAssembly::loadAssembly() { myRefShapes.clear(); loadShapes(pDoc->Main(), TopLoc_Location(), default_name, "", false,0); } std::string ImportOCAFAssembly::getName(const TDF_Label& label) { Handle(TDataStd_Name) name; std::string part_name; if (label.FindAttribute(TDataStd_Name::GetID(),name)) { TCollection_ExtendedString extstr = name->Get(); char* str = new char[extstr.LengthOfCString()+1]; extstr.ToUTF8CString(str); part_name = str; delete [] str; return part_name; //if (part_name.empty()) { // return ""; //} //else { // bool ws=true; // for (std::string::iterator it = part_name.begin(); it != part_name.end(); ++it) { // if (*it != ' ') { // ws = false; // break; // } // } // if (ws) // part_name = defaultname; //} } return ""; } void ImportOCAFAssembly::loadShapes(const TDF_Label& label, const TopLoc_Location& loc, const std::string& defaultname, const std::string& assembly, bool isRef, int dep) { int hash = 0; TopoDS_Shape aShape; if (aShapeTool->GetShape(label,aShape)) { hash = aShape.HashCode(HashUpper); } Handle(TDataStd_Name) name; std::string part_name = defaultname; if (label.FindAttribute(TDataStd_Name::GetID(),name)) { TCollection_ExtendedString extstr = name->Get(); char* str = new char[extstr.LengthOfCString()+1]; extstr.ToUTF8CString(str); part_name = str; delete [] str; if (part_name.empty()) { part_name = defaultname; } else { bool ws=true; for (std::string::iterator it = part_name.begin(); it != part_name.end(); ++it) { if (*it != ' ') { ws = false; break; } } if (ws) part_name = defaultname; } } TopLoc_Location part_loc = loc; Handle(XCAFDoc_Location) hLoc; if (label.FindAttribute(XCAFDoc_Location::GetID(), hLoc)) { if (isRef) part_loc = part_loc * hLoc->Get(); else part_loc = hLoc->Get(); } #ifdef FC_DEBUG const char *s; if( !hLoc.IsNull() ) s = hLoc->Get().IsIdentity()?"0":"1"; else s = "0"; std::stringstream str; Base::Console().Log("H:%-9d \tN:%-30s \tTop:%d, Asm:%d, Shape:%d, Compound:%d, Simple:%d, Free:%d, Ref:%d, Component:%d, SubShape:%d\tTrf:%s-- Dep:%d \n", hash, part_name.c_str(), aShapeTool->IsTopLevel(label), aShapeTool->IsAssembly(label), aShapeTool->IsShape(label), aShapeTool->IsCompound(label), aShapeTool->IsSimpleShape(label), aShapeTool->IsFree(label), aShapeTool->IsReference(label), aShapeTool->IsComponent(label), aShapeTool->IsSubShape(label), s, dep ); label.Dump(str); Base::Console().Message(str.str().c_str() ); #endif std::string asm_name = assembly; if (aShapeTool->IsAssembly(label)) { asm_name = part_name; } TDF_Label ref; if (aShapeTool->IsReference(label) && aShapeTool->GetReferredShape(label, ref)) { loadShapes(ref, part_loc, part_name, asm_name, true,dep + 1); } if (isRef || myRefShapes.find(hash) == myRefShapes.end()) { TopoDS_Shape aShape; if (isRef && aShapeTool->GetShape(label, aShape)) myRefShapes.insert(aShape.HashCode(HashUpper)); if (aShapeTool->IsSimpleShape(label) && (isRef || aShapeTool->IsFree(label))) { if (!asm_name.empty()) part_name = asm_name; if (isRef) createShape(label, loc, part_name); else createShape(label, part_loc, part_name); } else { for (TDF_ChildIterator it(label); it.More(); it.Next()) { loadShapes(it.Value(), part_loc, part_name, asm_name, isRef, dep+1); } } } } void ImportOCAFAssembly::createShape(const TDF_Label& label, const TopLoc_Location& loc, const std::string& name) { Base::Console().Log("-create Shape\n"); const TopoDS_Shape& aShape = aShapeTool->GetShape(label); if (!aShape.IsNull() && aShape.ShapeType() == TopAbs_COMPOUND) { TopExp_Explorer xp; int ctSolids = 0, ctShells = 0; for (xp.Init(aShape, TopAbs_SOLID); xp.More(); xp.Next(), ctSolids++) { createShape(xp.Current(), loc, name); } for (xp.Init(aShape, TopAbs_SHELL, TopAbs_SOLID); xp.More(); xp.Next(), ctShells++) { createShape(xp.Current(), loc, name); } if (ctSolids > 0 || ctShells > 0) return; } createShape(aShape, loc, name); } void ImportOCAFAssembly::createShape(const TopoDS_Shape& aShape, const TopLoc_Location& loc, const std::string& name) { Part::Feature* part = static_cast<Part::Feature*>(doc->addObject("Part::Feature")); if (!loc.IsIdentity()) part->Shape.setValue(aShape.Moved(loc)); else part->Shape.setValue(aShape); part->Label.setValue(name); Quantity_Color aColor; App::Color color(0.8f,0.8f,0.8f); if (aColorTool->GetColor(aShape, XCAFDoc_ColorGen, aColor) || aColorTool->GetColor(aShape, XCAFDoc_ColorSurf, aColor) || aColorTool->GetColor(aShape, XCAFDoc_ColorCurv, aColor)) { color.r = (float)aColor.Red(); color.g = (float)aColor.Green(); color.b = (float)aColor.Blue(); std::vector<App::Color> colors; colors.push_back(color); applyColors(part, colors); #if 0//TODO Gui::ViewProvider* vp = Gui::Application::Instance->getViewProvider(part); if (vp && vp->isDerivedFrom(PartGui::ViewProviderPart::getClassTypeId())) { color.r = aColor.Red(); color.g = aColor.Green(); color.b = aColor.Blue(); static_cast<PartGui::ViewProviderPart*>(vp)->ShapeColor.setValue(color); } #endif } TopTools_IndexedMapOfShape faces; TopExp_Explorer xp(aShape,TopAbs_FACE); while (xp.More()) { faces.Add(xp.Current()); xp.Next(); } bool found_face_color = false; std::vector<App::Color> faceColors; faceColors.resize(faces.Extent(), color); xp.Init(aShape,TopAbs_FACE); while (xp.More()) { if (aColorTool->GetColor(xp.Current(), XCAFDoc_ColorGen, aColor) || aColorTool->GetColor(xp.Current(), XCAFDoc_ColorSurf, aColor) || aColorTool->GetColor(xp.Current(), XCAFDoc_ColorCurv, aColor)) { int index = faces.FindIndex(xp.Current()); color.r = (float)aColor.Red(); color.g = (float)aColor.Green(); color.b = (float)aColor.Blue(); faceColors[index-1] = color; found_face_color = true; } xp.Next(); } if (found_face_color) { applyColors(part, faceColors); } }
using System; namespace NHibernate.Test.NHSpecificTest.NH1289 { [Serializable] public class WorkflowItem { public virtual int Id { get; set; } } }
// --------------------------------------------------------------------- // // Copyright (C) 1999 - 2016 by the deal.II authors // // This file is part of the deal.II library. // // The deal.II library is free software; you can use it, redistribute // it, and/or modify it under the terms of the GNU Lesser General // Public License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // The full text of the license can be found in the file LICENSE at // the top level of the deal.II distribution. // // --------------------------------------------------------------------- #ifndef dealii__grid_out_h #define dealii__grid_out_h #include <deal.II/base/config.h> #include <deal.II/base/exceptions.h> #include <deal.II/base/point.h> #include <deal.II/base/data_out_base.h> #include <string> DEAL_II_NAMESPACE_OPEN class ParameterHandler; template <int dim, int spacedim> class Triangulation; template <int dim, int spacedim> class Mapping; /** * Within this namespace, we define several structures that are used to * describe flags that can be given to grid output routines to modify the * default outfit of the grids written into a file. See the different * subclasses and the documentation of the GridOut class for more details. * * @ingroup output */ namespace GridOutFlags { /** * Flags for grid output in OpenDX format. * * @ingroup output */ struct DX { /** * Write cells. */ bool write_cells; /** * Write faces. */ bool write_faces; /** * Write field with diameters. */ bool write_diameter; /** * Write field with area/volume. */ bool write_measure; /** * Write all faces, including interior faces. If <tt>false</tt>, only * boundary faces are written. */ bool write_all_faces; /** * Constructor. */ DX (const bool write_cells = true, const bool write_faces = false, const bool write_diameter = false, const bool write_measure = false, const bool write_all_faces = true); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags describing the details of output in MSH format. * * @ingroup output */ struct Msh { /** * When writing a mesh, write boundary faces explicitly if their boundary * indicator is not the default boundary indicator, which is zero. This * is necessary if you later want to re-read the grid and want to get the * same boundary indicators for the different parts of the boundary of the * triangulation. * * It is not necessary if you only want to write the triangulation to view * or print it. * * Default: @p false. */ bool write_faces; /** * When writing a mesh, write boundary lines explicitly if their boundary * indicator is not the default boundary indicator, which is zero. This * is necessary if you later want to re-read the grid and want to get the * same boundary indicators for the different parts of the boundary of the * triangulation. * * It is not necessary if you only want to write the triangulation to view * or print it. * * This is used only if <tt>dim==3</tt>, and ignored in all other cases. * * Default: @p false. */ bool write_lines; /** * Constructor. */ Msh (const bool write_faces = false, const bool write_lines = false); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags describing the details of output in UCD format. * * @ingroup output */ struct Ucd { /** * Write a comment at the beginning of the file stating the date of * creation and some other data. While this is supported by the UCD * format (and the AVS program), some other programs get confused by this, * so the default is to not write a preamble. However, a preamble can be * written using this flag. * * Default: <code>false</code>. */ bool write_preamble; /** * When writing a mesh, write boundary faces explicitly if their boundary * indicator is not the default boundary indicator, which is zero. This * is necessary if you later want to re-read the grid and want to get the * same boundary indicators for the different parts of the boundary of the * triangulation. * * It is not necessary if you only want to write the triangulation to view * or print it. * * Default: @p false. */ bool write_faces; /** * When writing a mesh, write boundary lines explicitly if their boundary * indicator is not the default boundary indicator, which is zero. This * is necessary if you later want to re-read the grid and want to get the * same boundary indicators for the different parts of the boundary of the * triangulation. * * It is not necessary if you only want to write the triangulation to view * or print it. * * This directive is ignored if <tt>dim!=3</tt>. * * Default: @p false. */ bool write_lines; /** * Constructor. */ Ucd (const bool write_preamble = false, const bool write_faces = false, const bool write_lines = false); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags describing the details of output in GNUPLOT format. * * @ingroup output */ struct Gnuplot { /** * Write the number of each cell into the output file before starting with * the lines it is composed of, as a comment. This might be useful if you * want to find out details about the grid, for example the position of * cells of which you know the number. It enlarges the size of the output * significantly, however. * * Default: @p false. */ bool write_cell_numbers; /** * Based on the vertices of the face and #n_boundary_face_points * additional points a tensor product mesh (transformed to the real space) * of (#n_boundary_face_points+2)<sup>dim-1</sup> points is plotted on * each boundary face. */ unsigned int n_boundary_face_points; /** * Flag. If true also inner cells are plotted with curved boundaries. This * is useful when for e.g. MappingQEulerian with * #n_boundary_face_points&gt;. */ bool curved_inner_cells; /** * Constructor. */ Gnuplot (const bool write_cell_number = false, const unsigned int n_boundary_face_points = 2, const bool curved_inner_cells = false); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags describing the details of output for encapsulated postscript. In * this structure, the flags common to all dimensions are listed. Flags * which are specific to one space dimension only are listed in derived * classes. * * By default, the size of the picture is scaled such that the width equals * 300 units. * * @ingroup output */ struct EpsFlagsBase { /** * Enum denoting the possibilities whether the scaling should be done such * that the given @p size equals the width or the height of the resulting * picture. */ enum SizeType { width, height }; /** * See above. Default is @p width. */ SizeType size_type; /** * Width or height of the output as given in postscript units This usually * is given by the strange unit 1/72 inch. Whether this is height or width * is specified by the flag @p size_type. * * Default is 300. */ unsigned int size; /** * Width of a line in postscript units. Default is 0.5. */ double line_width; /** * Should lines with a set @p user_flag be drawn in a different color * (red)? See * @ref GlossUserFlags * for information about user flags. */ bool color_lines_on_user_flag; /** * The number of points on a boundary face that are plotted in addition to * the vertices of the face. * * This number is only used if the mapping used is not simply the standard * $Q_1$ mapping (i.e., an object of kind MappingQGeneric(1)) that may * describe edges of cells as curved and that will then be approximated * using line segments with a number of intermediate points as described * by the current variable. */ unsigned int n_boundary_face_points; /** * Should lines be colored according to their refinement level? This * overrides color_lines_on_user_flag for all levels except level 0. * Colors are: level 0: black, other levels: rainbow scale from blue to * red. */ bool color_lines_level; /** * Constructor. */ EpsFlagsBase (const SizeType size_type = width, const unsigned int size = 300, const double line_width = 0.5, const bool color_lines_on_user_flag = false, const unsigned int n_boundary_face_points = 2, const bool color_lines_level = false); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags describing the details of output for encapsulated postscript for * all dimensions not explicitly specialized below. Some flags that are * common to all dimensions are listed in the base class. * * This class does not actually exist, we only here declare the general * template and declare explicit specializations below. * * @ingroup output */ template <int dim> struct Eps {}; /** * Flags specific to the output of grids in one space dimensions. * * @ingroup output */ template <> struct Eps<1> : public EpsFlagsBase { /** * Constructor. */ Eps (const SizeType size_type = width, const unsigned int size = 300, const double line_width = 0.5, const bool color_lines_on_user_flag = false, const unsigned int n_boundary_face_points = 2); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags specific to the output of grids in two space dimensions. * * @ingroup output */ template <> struct Eps<2> : public EpsFlagsBase { /** * If this flag is set, then we place the number of the cell into the * middle of each cell. The default value is to not do this. * * The format of the cell number written is <tt>level.index</tt>, or * simply @p index, depending on the value of the following flag. */ bool write_cell_numbers; /** * If the cell numbers shall be written, using the above flag, then the * value of this flag determines whether the format shall be * <tt>level.index</tt>, or simply @p index. If @p true, the first format * is taken. Default is @p true. * * The flag has obviously no effect if @p write_cell_numbers is @p false. */ bool write_cell_number_level; /** * Vertex numbers can be written onto the vertices. This is controlled by * the following flag. Default is @p false. */ bool write_vertex_numbers; /** * Constructor. */ Eps (const SizeType size_type = width, const unsigned int size = 300, const double line_width = 0.5, const bool color_lines_on_user_flag = false, const unsigned int n_boundary_face_points = 2, const bool write_cell_numbers = false, const bool write_cell_number_level = true, const bool write_vertex_numbers = false, const bool color_lines_level = false); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags specific to the output of grids in three space dimensions. * * @ingroup output */ template <> struct Eps<3> : public EpsFlagsBase { /** * Angle of the line origin-viewer against the z-axis in degrees. * * Default is the Gnuplot-default of 60. */ double azimut_angle; /** * Angle by which the viewers position projected onto the x-y-plane is * rotated around the z-axis, in positive sense when viewed from above. * The unit are degrees, and zero equals a position above or below the * negative y-axis. * * Default is the Gnuplot-default of 30. */ double turn_angle; /** * Constructor. */ Eps (const SizeType size_type = width, const unsigned int size = 300, const double line_width = 0.5, const bool color_lines_on_user_flag = false, const unsigned int n_boundary_face_points = 2, const double azimut_angle = 60, const double turn_angle = 30); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags for XFig output. * * @ingroup output */ struct XFig { /** * Draw boundary lines. Default is true. */ bool draw_boundary; /** * An enum used for deciding which field is used for coloring the cells. */ enum Coloring { /// Convert the material id into the cell color material_id, /// Convert the level into the cell color level_number, /// Convert the global subdomain id into the cell color subdomain_id, /// Convert the level subdomain id into the cell color level_subdomain_id } color_by; /** * Code level to depth. Default is true. If false, color depends on * material or boundary id. * * Depth of the object is 900-level, if this value is true. */ bool level_depth; /** * Additional points for curved boundaries. Default is none. */ unsigned int n_boundary_face_points; /** * Scaling of graph. The default is a unit length of one inch. */ Point<2> scaling; /** * Offset of the graph. Before scaling, the coordinates are shifted by * this value. Default is zero in each direction. */ Point<2> offset; /** * Style for filling cells. Default is solid fill (20). This value is * forwarded unchanged into the corresponding field <tt>fill_style</tt> of * the polyline object of XFig. */ int fill_style; /** * Style for drawing border lines of polygons. Defaults to solid (0) and * is forwarded to XFig. */ int line_style; /** * Thickness of border lines of polygons. Default is 1. * * Set this to zero to avoid border lines for very fine meshes. */ int line_thickness; /** * Style for drawing lines at the boundary. Defaults to solid (0). */ int boundary_style; /** * Thickness of boundary lines. Default is 3. */ int boundary_thickness; /** * Constructor. */ XFig(); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags controlling SVG output. * * @ingroup output */ struct Svg { /// Height of the plot in SVG units, computed from width if zero. Defaults to 1000 unsigned int height; /// The width of the plot. Computed automatically from height if zero (default) unsigned int width; /// Thickness of the lines between cells unsigned int line_thickness; /// Thickness of lines at the boundary unsigned int boundary_line_thickness; /// Margin around the plotted area bool margin; /** * Background style. */ enum Background { /// Use transparent value of SVG transparent, /// Use white background white, /// Use a gradient from white (top) to steelblue (bottom), and add date and time plus a deal.II logo. Automatically draws a margin. dealii }; Background background; // View angles for the perspective view of the grid; Default is 0, 0 (top view). /** * The azimuth angle measured from ??? in degrees. Default is 0. */ int azimuth_angle; /** * The angle from vertically above the xy-plane. Default is 0. */ int polar_angle; /** * Cell coloring. */ enum Coloring { /// No cell coloring none, /// Convert the material id into the cell color (default) material_id, /// Convert the level number into the cell color level_number, /// Convert the subdomain id into the cell color subdomain_id, /// Convert the level subdomain id into the cell color level_subdomain_id }; Coloring coloring; /// Interpret the level number of the cells as altitude over the x-y-plane (useful in the perspective view). bool convert_level_number_to_height; /// The factor determining the vertical distance between levels (default = 0.3) float level_height_factor; /// Scaling of the font for cell annotations. Defaults to 1. float cell_font_scaling; /// Write level number into each cell. Defaults to true bool label_level_number; /// Write cell index into each cell. Defaults to true bool label_cell_index; /// Write material id of each cell. Defaults to false bool label_material_id; /// Write subdomain id of each cell. Defaults to false bool label_subdomain_id; /// Write level subdomain id of each cell. Defaults to false bool label_level_subdomain_id; /// Draw a colorbar next to the plotted grid with respect to the chosen coloring of the cells bool draw_colorbar; /// Draw a legend next to the plotted grid, explaining the label of the cells bool draw_legend; /** * Constructor. */ Svg(const unsigned int line_thickness = 2, const unsigned int boundary_line_thickness = 4, bool margin = true, const Background background = white, const int azimuth_angle = 0, const int polar_angle = 0, const Coloring coloring = level_number, const bool convert_level_number_to_height = false, const bool label_level_number = true, const bool label_cell_index = true, const bool label_material_id = false, const bool label_subdomain_id = false, const bool draw_colorbar = true, const bool draw_legend = true); }; /** * Flags for grid output in MathGL format. * * @ingroup output */ struct MathGL { /** * Constructor. */ MathGL (); /** * Draw a bounding box around the graph. */ bool draw_bounding_box; /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); }; /** * Flags for grid output in Vtk format. These flags are the same as those * declared in DataOutBase::VtkFlags. * * @ingroup output */ struct Vtk : public DataOutBase::VtkFlags {}; /** * Flags for grid output in Vtu format. These flags are the same as those * declared in DataOutBase::VtuFlags. * * @ingroup output */ struct Vtu : public DataOutBase::VtkFlags {}; } /** * This class provides a means to output a triangulation to a file in * different formats. See the enum GridOut::OutputFormat for a list of formats * and the corresponding output function names. * * Usage is simple: either you use the direct form * @code * ofstream output_file("some_filename"); * GridOut().write_gnuplot (tria, output_file); * @endcode * if you know which format you want to have, or if you want the format to be * a runtime parameter, you can write * @code * GridOut::OutputFormat grid_format = * GridOut::parse_output_format(get_format_name_from_somewhere()); * ofstream output_file("some_filename" + GridOut::default_suffix(output_format)); * GridOut().write (tria, output_file, output_format); * @endcode * The function <tt>get_output_format_names()</tt> provides a list of possible * names of output formats in a string that is understandable by the * ParameterHandler class. * * Note that here, we have created an unnamed object of type GridOut and * called one of its <tt>write_*</tt> functions. This looks like as if the * respective function could really be made @p static. This was not done in * order to allow for parameters to be passed to the different output * functions in a way compatible with the scheme of allowing the right output * format to be selected at run-time through the generic @p write function. * * In order to explain this, consider each function had one or more additional * parameters giving the details of output, for example position of the * spectator for 3d meshed, line thicknesses, etc. While this would allow each * output function any flexibility it needs, it would not allow us to use the * generic function @p write which is given a parameter determining the output * format, since it is impractical to give it a list of parameters for each * and every output format supported which it may then pass on to the * respective output function. * * Rather, we have chosen to let each object of this class GridOut have a set * of parameters for each supported output format. These are collected in * structures GridOutFlags::Eps(), GridOutFlags::Gnuplot(), etc declared in * the GridOutFlags namespace, and you can set your preferred flags like this: * @code * GridOut grid_out; * GridOutFlags::Ucd ucd_flags; * ... // set some fields in ucd_flags * grid_out.set_flags (ucd_flags); * ... * ... // write some file with data_out * @endcode * The respective output function then use the so-set flags. By default, they * are set to reasonable values as described above and in the documentation of * the different flags structures. Resetting the flags can be done by calling * <tt>grid_out.set_flags (GridOutFlags::Ucd());</tt>, since the default * constructor of each of the flags structures sets the parameters to their * initial values. * * The advantage of this approach is that it is possible to change the flags * of one or more output formats according to your needs and later use the * generic @p write function; the actual output function then called will use * the flags as set before. * * Note that some of the structures describing the flags of the different * output formats are empty since the respective format does not support any * flags. The structure and the @p set_flags function are provided anyway. * Note also that some of the structures may differ between the dimensions * supported by this class; they then have a template parameter, as usual. * * @ingroup grid * @ingroup output * @author Wolfgang Bangerth, Guido Kanschat, Luca Heltai, Stefan Nauber, * Christian Wülker * @date 1999 - 2013 */ class GridOut { public: /** * Declaration of a name for each of the different output formats. These are * used by the generic output function write() to determine the actual * output format. */ enum OutputFormat { /// Do nothing in write() none, /// write() calls write_dx() dx, /// write() calls write_gnuplot() gnuplot, /// write() calls write_eps() eps, /// write() calls write_ucd() ucd, /// write() calls write_xfig() xfig, /// write() calls write_msh() msh, /// write() calls write_svg() svg, /// write() calls write_mathgl() mathgl, /// write() calls write_vtk() vtk, /// write() calls write_vtu() vtu }; /** * Constructor. */ GridOut (); /** * Write triangulation in OpenDX format. * * Cells or faces are written together with their level and their material * id or boundary indicator, resp. * * Not implemented for the codimension one case. */ template <int dim, int spacedim> void write_dx (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write the triangulation in the gnuplot format. * * In GNUPLOT format, each cell is written as a sequence of its confining * lines. Apart from the coordinates of the line's end points, the level and * the material of the cell are appended to each line of output. Therefore, * if you let GNUPLOT draw a 2d grid as a 3d plot, you will see more refined * cells being raised against cells with less refinement. Also, if you draw * a cut through a 3d grid, you can extrude the refinement level in the * direction orthogonal to the cut plane. The same can be done with the * material id, which is plotted after the level. * * A more useful application of this feature is the following: if you use * the GNUPLOT command (for a 2d grid here) * @verbatim * splot [:][:][2.5:3.5] "grid_file.gnuplot" * * @endverbatim * then the whole x- and y-range will be plotted, i.e. the whole grid, but * only those lines with a z-value between 2.5 and 3.5. Since the z-values * were chosen to be the level to which a cell belongs, this results in a * plot of those cells only that belong to level 3 in this example. This * way, it is easy to produce plots of the different levels of grid. * * @p mapping is a pointer to a mapping used for the transformation of cells * at the boundary. If zero, then use standard Q1 mapping. * * Names and values of additional flags controlling the output can be found * in the documentation of the GridOutFlags::Gnuplot() class. * * Not implemented for the codimension one case. */ template <int dim, int spacedim> void write_gnuplot (const Triangulation<dim,spacedim> &tria, std::ostream &out, const Mapping<dim,spacedim> *mapping=0) const; /** * Write the triangulation in the msh format. * * Msh is the format used by Gmsh and it is described in the gmsh user's * guide. Besides the usual output of the grid only, you can decide through * additional flags (see below, and the documentation of the * GridOutFlags::Msh() class) whether boundary faces with non-zero boundary * indicator shall be written to the file explicitly. This is useful, if you * want to re-read the grid later on, since <tt>deal.II</tt> sets the * boundary indicator to zero by default; therefore, to obtain the same * triangulation as before, you have to specify faces with differing * boundary indicators explicitly, which is done by this flag. * * Names and values of further flags controlling the output can be found in * the documentation of the GridOutFlags::Msh() class. * * Works also in the codimension one case. */ template <int dim, int spacedim> void write_msh (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write the triangulation in the ucd format. * * UCD (unstructured cell data) is the format used by AVS and some other * programs. It is described in the AVS developer's guide. Besides the usual * output of the grid only, you can decide through additional flags (see * below, and the documentation of the GridOutFlags::Ucd() class) whether * boundary faces with non-zero boundary indicator shall be written to the * file explicitly. This is useful, if you want to re-read the grid later * on, since <tt>deal.II</tt> sets the boundary indicator to zero by * default; therefore, to obtain the same triangulation as before, you have * to specify faces with differing boundary indicators explicitly, which is * done by this flag. * * Names and values of further flags controlling the output can be found in * the documentation of the GridOutFlags::Ucd() class. * * Works also for the codimension one case. */ template <int dim, int spacedim> void write_ucd (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write the triangulation in the encapsulated postscript format. * * In this format, each line of the triangulation is written separately. We * scale the picture such that either x-values or y-values range between * zero and a fixed size. The other axis is scaled by the same factor. Which * axis is taken to compute the scale and the size of the box it shall fit * into is determined by the output flags (see below, and the documentation * of the GridOutFlags::Eps() class). * * The bounding box is close to the triangulation on all four sides, without * an extra frame. The line width is chosen to be 0.5 by default, but can be * changed. The line width is to be compared with the extension of the * picture, of which the default is 300. * * The flag @p color_lines_on_user_flag allows to draw lines with the @p * user_flag set to be drawn in red. The colors black and red are defined as * @p b and @p r in the preamble of the output file and can be changed there * according to need. * * @p mapping is a pointer to a mapping used for the transformation of cells * at the boundary. If zero, then use standard Q1 mapping. * * Names and values of additional flags controlling the output can be found * in the documentation of the GridOutFlags::Eps() class. Especially the * viewpoint for three dimensional grids is of importance here. * * Not implemented for the codimension one case. */ template <int dim, int spacedim> void write_eps (const Triangulation<dim, spacedim> &tria, std::ostream &out, const Mapping<dim, spacedim> *mapping=0) const; /** * Write two-dimensional XFig-file. * * This function writes all grid cells as polygons and optionally boundary * lines. Several parameters can be adjusted by the XFigFlags control * object. * * If levels are coded to depth, the complete grid hierarchy is plotted with * fine cells before their parents. This way, levels can be switched on and * off in xfig by selecting levels. * * Polygons are either at depth 900-level or at 900+@p material_id, * depending on the flag @p level_depth. Accordingly, boundary edges are at * depth 800-level or at 800+@p boundary_id. Therefore, boundary edges are * always in front of cells. * * Not implemented for the codimension one case. */ template <int dim, int spacedim> void write_xfig (const Triangulation<dim, spacedim> &tria, std::ostream &out, const Mapping<dim, spacedim> *mapping=0) const; /** * Write the triangulation in the SVG format. * * SVG (Scalable Vector Graphics) is an XML-based vector image format * developed and maintained by the World Wide Web Consortium (W3C). This * function conforms to the latest specification SVG 1.1, released on August * 16, 2011. * * The cells of the triangulation are written as polygons with additional * lines at the boundary of the triangulation. A coloring of the cells is * further possible in order to visualize a certain property of the cells * such as their level or material id. A colorbar can be drawn to encode the * chosen coloring. Moreover, a cell label can be added, showing level * index, etc. * * @note This function is currently only implemented for two-dimensional * grids in two space dimensions. */ void write_svg (const Triangulation<2,2> &tria, std::ostream &out) const; /** * Declaration of the same function as above for all other dimensions and * space dimensions. This function is not currently implemented and is only * declared to exist to support dimension independent programming. */ template <int dim, int spacedim> void write_svg (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write triangulation in MathGL script format. To interpret this file a * version of MathGL>=2.0.0 is required. * * To get a handle on the resultant MathGL script within a graphical * environment an interpreter is needed. A suggestion to start with is * <code>mglview</code>, which is bundled with MathGL. <code>mglview</code> * can interpret and display small-to-medium MathGL scripts in a graphical * window and enables conversion to other formats such as EPS, PNG, JPG, * SVG, as well as view/display animations. Some minor editing, such as * modifying the lighting or alpha channels, can also be done. * * @note Not implemented for the codimension one case. */ template <int dim, int spacedim> void write_mathgl (const Triangulation<dim, spacedim> &tria, std::ostream &out) const; /** * Write triangulation in VTK format. */ template <int dim, int spacedim> void write_vtk (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write triangulation in VTU format. */ template <int dim, int spacedim> void write_vtu (const Triangulation<dim,spacedim> &tria, std::ostream &out) const; /** * Write triangulation in VTU format for each processor, and add a .pvtu file for * visualization in Visit or Paraview that describes the collection of VTU files * as all part of the same simulation. The output is in the form * <tt>filename_without_extension.proc000*.vtu</tt> where * is 0,1,...,n_proc-1 and * <tt>filename_without_extension.pvtu</tt>. The input <tt>view_levels</tt> can be * set as true to view each level of a multilevel method. The input * <tt>include_artificial</tt> can be set as true to view the artificial cells for * each processor. Each .vtu and .pvtu file will have the attributes subdomain, * level_subdomain, level, and proc_writing. The level value can be used to seperate the * image into the view of the grid on each level of a multilevel method and the * proc_writing value can be used to seperate the image into each processor's owned and * ghost cells. * This is accomplished by applying the "warp by scalar" filter in paraview * to each of the values. After opening the .pvtu file of a mesh where the input * <tt>view_levels</tt> is set to true, select the "warp by scalar" * filter. For the "Scalars" input select <tt>proc_writing</tt> and for the "Normal" input * enter in 1 0 0, then click Apply. Next select the "warp by scalar" filter again. For the * "Scalars" input select <tt>level</tt> and for the "Normal" input enter in 0 1 0, * then click Apply. This will give you the following image. * @image html write_mesh_vtu_levels.png * If the <tt>view_levels</tt> remains at false, thereby only giving the mesh for the active * level, it is enough to seperate the image into the views written by different processors. * This is shown in the following image where the <tt>include_artificial</tt> input is set as true. * @image html write_mesh_vtu_active.png * Note: Depending on the size of the mesh you may need to increase the "Scale Factor" input * so that each piece does not overlap. */ template <int dim, int spacedim> void write_mesh_per_processor_as_vtu (const Triangulation<dim,spacedim> &tria, const std::string &filename_without_extension, const bool view_levels=false, const bool include_artificial=false) const; /** * Write grid to @p out according to the given data format. This function * simply calls the appropriate <tt>write_*</tt> function. */ template <int dim, int spacedim> void write (const Triangulation<dim,spacedim> &tria, std::ostream &out, const OutputFormat output_format, const Mapping<dim,spacedim> *mapping=0) const; /** * Write mesh in default format set by ParameterHandler. */ template <int dim, int spacedim> void write (const Triangulation<dim,spacedim> &tria, std::ostream &out, const Mapping<dim,spacedim> *mapping=0) const; /** * Set flags for DX output */ void set_flags (const GridOutFlags::DX &flags); /** * Set flags for GMSH output */ void set_flags (const GridOutFlags::Msh &flags); /** * Set flags for UCD output */ void set_flags (const GridOutFlags::Ucd &flags); /** * Set flags for GNUPLOT output */ void set_flags (const GridOutFlags::Gnuplot &flags); /** * Set flags for EPS output of a one-dimensional triangulation */ void set_flags (const GridOutFlags::Eps<1> &flags); /** * Set flags for EPS output of a two-dimensional triangulation */ void set_flags (const GridOutFlags::Eps<2> &flags); /** * Set flags for EPS output of a three-dimensional triangulation */ void set_flags (const GridOutFlags::Eps<3> &flags); /** * Set flags for EPS output of a three-dimensional triangulation */ void set_flags (const GridOutFlags::XFig &flags); /** * Set flags for SVG output */ void set_flags (const GridOutFlags::Svg &flags); /** * Set flags for MathGL output */ void set_flags (const GridOutFlags::MathGL &flags); /** * Set flags for VTK output */ void set_flags (const GridOutFlags::Vtk &flags); /** * Set flags for VTU output */ void set_flags (const GridOutFlags::Vtu &flags); /** * Provide a function that can tell us which suffix a given output format * usually has. For example, it defines the following mappings: * <ul> * <li> @p OpenDX: <tt>.dx</tt> * <li> @p gnuplot: <tt>.gnuplot</tt> * <li> @p ucd: <tt>.inp</tt> * <li> @p eps: <tt>.eps</tt>. * </ul> * Similar mappings are provided for all implemented formats. * * Since this function does not need data from this object, it is static and * can thus be called without creating an object of this class. */ static std::string default_suffix (const OutputFormat output_format); /** * Default suffix for the default output format selected through * ParameterHandler. */ std::string default_suffix () const; /** * Return the @p OutputFormat value corresponding to the given string. If * the string does not match any known format, an exception is thrown. * * Since this function does not need data from this object, it is static and * can thus be called without creating an object of this class. Its main * purpose is to allow a program to use any implemented output format * without the need to extend the program's parser each time a new format is * implemented. * * To get a list of presently available format names, e.g. to give it to the * ParameterHandler class, use the function get_output_format_names(). */ static OutputFormat parse_output_format (const std::string &format_name); /** * Return a list of implemented output formats. The different names are * separated by vertical bar signs (<tt>`|'</tt>) as used by the * ParameterHandler classes. */ static std::string get_output_format_names (); /** * Declare parameters in ParameterHandler. */ static void declare_parameters (ParameterHandler &param); /** * Parse parameters of ParameterHandler. */ void parse_parameters (ParameterHandler &param); /** * Determine an estimate for the memory consumption (in bytes) of this * object. */ std::size_t memory_consumption () const; /** * Exception */ DeclException0 (ExcInvalidState); private: /** * The default output format, set by a ParameterHandler. */ OutputFormat default_format; /** * Flags for OpenDX output. */ GridOutFlags::DX dx_flags; /** * Flags for GMSH output. Can be changed by using the set_flags(const * GridOutFlags::Msh&) function. */ GridOutFlags::Msh msh_flags; /** * Flags for UCD output. Can be changed by using the set_flags(const * GridOutFlags::Ucd&) function. */ GridOutFlags::Ucd ucd_flags; /** * Flags to be used upon output of GNUPLOT data. Can be changed by using the * set_flags(const GridOutFlags::Gnuplot&) function. */ GridOutFlags::Gnuplot gnuplot_flags; /** * Flags to be used upon output of EPS data in one space dimension. Can be * changed by using the set_flags(const GridOutFlags::Eps<1>&) function. */ GridOutFlags::Eps<1> eps_flags_1; /** * Flags to be used upon output of EPS data in two space dimensions. Can be * changed by using the @p set_flags function. */ GridOutFlags::Eps<2> eps_flags_2; /** * Flags to be used upon output of EPS data in three space dimensions. Can * be changed by using the @p set_flags function. */ GridOutFlags::Eps<3> eps_flags_3; /** * Flags used for XFig output. */ GridOutFlags::XFig xfig_flags; /** * Flags used for Svg output. */ GridOutFlags::Svg svg_flags; /** * Flags for MathGL output. */ GridOutFlags::MathGL mathgl_flags; /** * Flags for VTK output. */ GridOutFlags::Vtk vtk_flags; /** * Flags for VTU output. */ GridOutFlags::Vtu vtu_flags; /** * Write the grid information about faces to @p out. Only those faces are * printed which are on the boundary and which have a boundary indicator not * equal to zero, since the latter is the default for boundary faces. * * Since, in GMSH, geometric elements are continuously numbered, this * function requires a parameter @p next_element_index providing the next * geometric element number. This index should have a numerical value equal * to one more than the index previously used to write a geometric element * to @p out. * * @returns The next unused geometric element index. * * @warning @p next_element_index should be (at least) one larger than the * current number of triangulation elements (lines, cells, faces) that have * been written to @p out. GMSH will not load the saved file correctly if * there are repeated indices. * * This function unfortunately can not be included in the regular @p * write_msh function, since it needs special treatment for the case * <tt>dim==1</tt>, in which case the face iterators are <tt>void*</tt>'s * and lack the member functions which are called. We would not actually * call these functions, but the compiler would complain anyway when * compiling the function for <tt>dim==1</tt>. Bad luck. */ template <int dim, int spacedim> unsigned int write_msh_faces (const Triangulation<dim,spacedim> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d. Does nothing. */ unsigned int write_msh_faces (const Triangulation<1,1> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d, 2sd. Does * nothing. */ unsigned int write_msh_faces (const Triangulation<1,2> &tria, const unsigned int next_element_index, std::ostream &out) const; unsigned int write_msh_faces (const Triangulation<1,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Write the grid information about lines to @p out. Only those lines are * printed which are on the boundary and which have a boundary indicator not * equal to zero, since the latter is the default for boundary faces. * * Since, in GMSH, geometric elements are continuously numbered, this * function requires a parameter @p next_element_index providing the next * geometric element number. This index should have a numerical value equal * to one more than the index previously used to write a geometric element * to @p out. * * @returns The next unused geometric element index. * * @warning @p next_element_index should be (at least) one larger than the * current number of triangulation elements (lines, cells, faces) that have * been written to @p out. GMSH will not load the saved file correctly if * there are repeated indices. * * This function unfortunately can not be included in the regular @p * write_msh function, since it needs special treatment for the case * <tt>dim==1</tt> and <tt>dim==2</tt>, in which case the edge iterators are * <tt>void*</tt>'s and lack the member functions which are called. We would * not actually call these functions, but the compiler would complain anyway * when compiling the function for <tt>dim==1/2</tt>. Bad luck. */ template <int dim, int spacedim> unsigned int write_msh_lines (const Triangulation<dim,spacedim> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d. Does nothing. */ unsigned int write_msh_lines (const Triangulation<1,1> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d, 2sd. Does * nothing. */ unsigned int write_msh_lines (const Triangulation<1,2> &tria, const unsigned int next_element_index, std::ostream &out) const; unsigned int write_msh_lines (const Triangulation<1,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 2d. Does nothing. */ unsigned int write_msh_lines (const Triangulation<2,2> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 2d, 3sd. Does * nothing. */ unsigned int write_msh_lines (const Triangulation<2,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Write the grid information about faces to @p out. Only those faces are * printed which are on the boundary and which have a boundary indicator not * equal to zero, since the latter is the default for boundary faces. * * Since (in the UCD format) geometric elements are continuously numbered, * this function requires a parameter @p next_element_index providing the * next geometric element number. This index should have a numerical value * equal to one more than the index previously used to write a geometric * element to @p out. * * @returns The next unused geometric element index. * * @warning @p next_element_index should be (at least) one larger than the * current number of triangulation elements (lines, cells, faces) that have * been written to @p out. Visualization programs may not load the saved * file correctly if there are repeated indices. * * This function unfortunately can not be included in the regular @p * write_ucd function, since it needs special treatment for the case * <tt>dim==1</tt>, in which case the face iterators are <tt>void*</tt>'s * and lack the member functions which are called. We would not actually * call these functions, but the compiler would complain anyway when * compiling the function for <tt>dim==1</tt>. Bad luck. */ template <int dim, int spacedim> unsigned int write_ucd_faces (const Triangulation<dim,spacedim> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d. Does nothing. */ unsigned int write_ucd_faces (const Triangulation<1,1> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d, 2sd. Does * nothing. */ unsigned int write_ucd_faces (const Triangulation<1,2> &tria, const unsigned int next_element_index, std::ostream &out) const; unsigned int write_ucd_faces (const Triangulation<1,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Write the grid information about lines to @p out. Only those lines are * printed which are on the boundary and which have a boundary indicator not * equal to zero, since the latter is the default for boundary lines. * * Since (in the UCD format) geometric elements are continuously numbered, * this function requires a parameter @p next_element_index providing the * next geometric element number. This index should have a numerical value * equal to one more than the index previously used to write a geometric * element to @p out. * * @returns The next unused geometric element index. * * @warning @p next_element_index should be (at least) one larger than the * current number of triangulation elements (lines, cells, faces) that have * been written to @p out. Visualization programs may not load the saved * file correctly if there are repeated indices. * * This function unfortunately can not be included in the regular @p * write_ucd function, since it needs special treatment for the case * <tt>dim==1/2</tt>, in which case the edge iterators are <tt>void*</tt>'s * and lack the member functions which are called. We would not actually * call these functions, but the compiler would complain anyway when * compiling the function for <tt>dim==1/2</tt>. Bad luck. */ template <int dim, int spacedim> unsigned int write_ucd_lines (const Triangulation<dim,spacedim> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d. Does nothing. */ unsigned int write_ucd_lines (const Triangulation<1,1> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 1d, 2sd. Does * nothing. */ unsigned int write_ucd_lines (const Triangulation<1,2> &tria, const unsigned int next_element_index, std::ostream &out) const; unsigned int write_ucd_lines (const Triangulation<1,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 2d. Does nothing. */ unsigned int write_ucd_lines (const Triangulation<2,2> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * Declaration of the specialization of above function for 2d, 3sd. Does * nothing. */ unsigned int write_ucd_lines (const Triangulation<2,3> &tria, const unsigned int next_element_index, std::ostream &out) const; /** * This function projects a three-dimensional point (Point<3> point) onto a * two-dimensional image plane, specified by the position of the camera * viewing system (Point<3> camera_position), camera direction (Point<3> * camera_position), camera horizontal (Point<3> camera_horizontal, * necessary for the correct alignment of the later images), and the focus * of the camera (float camera_focus). * * For SVG output of grids. */ static Point<2> svg_project_point(Point<3> point, Point<3> camera_position, Point<3> camera_direction, Point<3> camera_horizontal, float camera_focus); /** * Return the number of faces in the triangulation which have a boundary * indicator not equal to zero. Only these faces are explicitly printed in * the <tt>write_*</tt> functions; all faces with indicator * numbers::internal_face_boundary_id are interior ones and an indicator * with value zero for faces at the boundary are considered default. * * This function always returns an empty list in one dimension. * * The reason for this function is the same as for write_ucd_faces(). See * there for more information. */ template <int dim, int spacedim> unsigned int n_boundary_faces (const Triangulation<dim,spacedim> &tria) const; /** * Declaration of the specialization of above function for 1d. Simply * returns zero. */ unsigned int n_boundary_faces (const Triangulation<1,1> &tria) const; /** * Declaration of the specialization of above function for 1d, 2sd. Simply * returns zero. */ unsigned int n_boundary_faces (const Triangulation<1,2> &tria) const; unsigned int n_boundary_faces (const Triangulation<1,3> &tria) const; /** * Return the number of lines in the triangulation which have a boundary * indicator not equal to zero. Only these lines are explicitly printed in * the <tt>write_*</tt> functions; all lines with indicator * numbers::internal_face_boundary_id are interior ones and an indicator * with value zero for faces at the boundary are considered default. * * This function always returns an empty list in one and two dimensions. * * The reason for this function is the same as for write_ucd_faces(). See * there for more information. */ template <int dim, int spacedim> unsigned int n_boundary_lines (const Triangulation<dim,spacedim> &tria) const; /** * Declaration of the specialization of above function for 1d. Simply * returns zero. */ unsigned int n_boundary_lines (const Triangulation<1,1> &tria) const; /** * Declaration of the specialization of above function for 1d, 2sd. Simply * returns zero. */ unsigned int n_boundary_lines (const Triangulation<1,2> &tria) const; unsigned int n_boundary_lines (const Triangulation<1,3> &tria) const; /** * Declaration of the specialization of above function for 2d. Simply * returns zero. */ unsigned int n_boundary_lines (const Triangulation<2,2> &tria) const; /** * Declaration of the specialization of above function for 2d, 3sd. Simply * returns zero. */ unsigned int n_boundary_lines (const Triangulation<2,3> &tria) const; }; DEAL_II_NAMESPACE_CLOSE #endif
package net.sourceforge.jsocks.test; import net.sourceforge.jsocks.socks.*; import net.sourceforge.jsocks.socks.server.*; import java.net.Socket; /** Test file for UserPasswordAuthentictor */ public class UPSOCKS implements UserValidation{ String user, password; UPSOCKS(String user,String password){ this.user = user; this.password = password; } public boolean isUserValid(String user,String password,Socket s){ System.err.println("User:"+user+"\tPassword:"+password); System.err.println("Socket:"+s); return (user.equals(this.user) && password.equals(this.password)); } public static void main(String args[]){ String user, password; if(args.length == 2){ user = args[0]; password = args[1]; }else{ user = "user"; password = "password"; } UPSOCKS us = new UPSOCKS(user,password); UserPasswordAuthenticator auth = new UserPasswordAuthenticator(us); ProxyServer server = new ProxyServer(auth); server.start(1080); } }
package barqsoft.footballscores; import android.content.Intent; import android.os.Bundle; import android.support.v7.app.ActionBarActivity; import android.util.Log; import android.view.Menu; import android.view.MenuItem; public class MainActivity extends ActionBarActivity { public static int selected_match_id; public static int current_fragment = 2; public static String LOG_TAG = "MainActivity"; private final String save_tag = "Save Test"; private PagerFragment my_main; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); Log.d(LOG_TAG, "Reached MainActivity onCreate"); if (savedInstanceState == null) { my_main = new PagerFragment(); getSupportFragmentManager().beginTransaction() .add(R.id.container, my_main) .commit(); } } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_main, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_about) { Intent start_about = new Intent(this,AboutActivity.class); startActivity(start_about); return true; } return super.onOptionsItemSelected(item); } @Override protected void onSaveInstanceState(Bundle outState) { Log.v(save_tag,"will save"); Log.v(save_tag,"fragment: "+String.valueOf(my_main.mPagerHandler.getCurrentItem())); Log.v(save_tag,"selected id: "+selected_match_id); outState.putInt("Pager_Current",my_main.mPagerHandler.getCurrentItem()); outState.putInt("Selected_match",selected_match_id); getSupportFragmentManager().putFragment(outState,"my_main",my_main); super.onSaveInstanceState(outState); } @Override protected void onRestoreInstanceState(Bundle savedInstanceState) { Log.v(save_tag,"will retrive"); Log.v(save_tag,"fragment: "+String.valueOf(savedInstanceState.getInt("Pager_Current"))); Log.v(save_tag,"selected id: "+savedInstanceState.getInt("Selected_match")); current_fragment = savedInstanceState.getInt("Pager_Current"); selected_match_id = savedInstanceState.getInt("Selected_match"); my_main = (PagerFragment) getSupportFragmentManager().getFragment(savedInstanceState,"my_main"); super.onRestoreInstanceState(savedInstanceState); } }
# Elasticsearch for Kubernetes This directory contains the source for a Docker image that creates an instance of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can be used to automatically form clusters when used with [replication controllers](../../docs/replication-controller.md). This will not work with the library Elasticsearch image because multicast discovery will not find the other pod IPs needed to form a cluster. This image detects other Elasticsearch [pods](../../docs/pods.md) running in a specified [namespace](../../docs/namespaces.md) with a given label selector. The detected instances are used to form a list of peer hosts which are used as part of the unicast discovery mechansim for Elasticsearch. The detection of the peer nodes is done by a program which communicates with the Kubernetes API server to get a list of matching Elasticsearch pods. To enable authenticated communication this image needs a [secret](../../docs/secrets.md) to be mounted at `/etc/apiserver-secret` with the basic authentication username and password. Here is an example replication controller specification that creates 4 instances of Elasticsearch which is in the file [music-rc.yaml](music-rc.yaml). ``` apiVersion: v1 kind: ReplicationController metadata: labels: name: music-db namespace: mytunes name: music-db spec: replicas: 4 selector: name: music-db template: metadata: labels: name: music-db spec: containers: - name: es image: kubernetes/elasticsearch:1.0 env: - name: "CLUSTER_NAME" value: "mytunes-db" - name: "SELECTOR" value: "name=music-db" - name: "NAMESPACE" value: "mytunes" ports: - name: es containerPort: 9200 - name: es-transport containerPort: 9300 volumeMounts: - name: apiserver-secret mountPath: /etc/apiserver-secret readOnly: true volumes: - name: apiserver-secret secret: secretName: apiserver-secret ``` The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to exist in the same namespace. The `SELECTOR` variable should be set to a label query that identifies the Elasticsearch nodes that should participate in this cluster. For our example we specify `name=music-db` to match all pods that have the label `name` set to the value `music-db`. The `NAMESPACE` variable identifies the namespace to be used to search for Elasticsearch pods and this should be the same as the namespace specified for the replication controller (in this case `mytunes`). Before creating pods with the replication controller a secret containing the bearer authentication token should be set up. A template is provided in the file [apiserver-secret.yaml](apiserver-secret.yaml): ``` apiVersion: v1 kind: Secret metadata: name: apiserver-secret namespace: NAMESPACE data: token: "TOKEN" ``` Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded versions of the bearer token reported by `kubectl config view` e.g. ``` $ kubectl config view ... - name: kubernetes-logging_kubernetes-basic-auth ... token: yGlDcMvSZPX4PyP0Q5bHgAYgi1iyEHv2 ... $ echo yGlDcMvSZPX4PyP0Q5bHgAYgi1iyEHv2 | base64 eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK= ``` resulting in the file: ``` apiVersion: v1 kind: Secret metadata: name: apiserver-secret namespace: mytunes data: token: "eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK=" ``` which can be used to create the secret in your namespace: ``` kubectl create -f apiserver-secret.yaml --namespace=mytunes secrets/apiserver-secret ``` Now you are ready to create the replication controller which will then create the pods: ``` $ kubectl create -f music-rc.yaml --namespace=mytunes replicationcontrollers/music-db ``` It's also useful to have a [service](../../docs/services.md) with an load balancer for accessing the Elasticsearch cluster which can be found in the file [music-service.yaml](music-service.yaml). ``` apiVersion: v1 kind: Service metadata: name: music-server namespace: mytunes labels: name: music-db spec: selector: name: music-db ports: - name: db port: 9200 targetPort: es type: LoadBalancer ``` Let's create the service with an external load balancer: ``` $ kubectl create -f music-service.yaml --namespace=mytunes services/music-server ``` Let's see what we've got: ``` $ kubectl get pods,rc,services,secrets --namespace=mytunes POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE music-db-0fwsu 10.244.2.48 kubernetes-minion-m49b/104.197.35.221 name=music-db Running 6 minutes es kubernetes/elasticsearch:1.0 Running 29 seconds music-db-5pc2e 10.244.0.24 kubernetes-minion-3c8c/146.148.41.184 name=music-db Running 6 minutes es kubernetes/elasticsearch:1.0 Running 6 minutes music-db-bjqmv 10.244.3.31 kubernetes-minion-zey5/104.154.59.10 name=music-db Running 6 minutes es kubernetes/elasticsearch:1.0 Running 19 seconds music-db-swtrs 10.244.1.37 kubernetes-minion-f9dw/130.211.159.230 name=music-db Running 6 minutes es kubernetes/elasticsearch:1.0 Running 6 minutes CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS music-db es kubernetes/elasticsearch:1.0 name=music-db 4 NAME LABELS SELECTOR IP(S) PORT(S) music-server name=music-db name=music-db 10.0.138.61 9200/TCP 104.197.12.157 NAME TYPE DATA apiserver-secret Opaque 2 ``` This shows 4 instances of Elasticsearch running. After making sure that port 9200 is accessible for this cluster (e.g. using a firewall rule for GCE) we can make queries via the service which will be fielded by the matching Elasticsearch pods. ``` $ curl 104.197.12.157:9200 { "status" : 200, "name" : "Warpath", "cluster_name" : "mytunes-db", "version" : { "number" : "1.5.2", "build_hash" : "62ff9868b4c8a0c45860bebb259e21980778ab1c", "build_timestamp" : "2015-04-27T09:21:06Z", "build_snapshot" : false, "lucene_version" : "4.10.4" }, "tagline" : "You Know, for Search" } $ curl 104.197.12.157:9200 { "status" : 200, "name" : "Callisto", "cluster_name" : "mytunes-db", "version" : { "number" : "1.5.2", "build_hash" : "62ff9868b4c8a0c45860bebb259e21980778ab1c", "build_timestamp" : "2015-04-27T09:21:06Z", "build_snapshot" : false, "lucene_version" : "4.10.4" }, "tagline" : "You Know, for Search" } ``` We can query the nodes to confirm that an Elasticsearch cluster has been formed. ``` $ curl 104.197.12.157:9200/_nodes?pretty=true { "cluster_name" : "mytunes-db", "nodes" : { "u-KrvywFQmyaH5BulSclsA" : { "name" : "Jonas Harrow", ... "discovery" : { "zen" : { "ping" : { "unicast" : { "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] }, ... "name" : "Warpath", ... "discovery" : { "zen" : { "ping" : { "unicast" : { "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] }, ... "name" : "Callisto", ... "discovery" : { "zen" : { "ping" : { "unicast" : { "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] }, ... "name" : "Vapor", ... "discovery" : { "zen" : { "ping" : { "unicast" : { "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] ... ``` Let's ramp up the number of Elasticsearch nodes from 4 to 10: ``` $ kubectl scale --replicas=10 replicationcontrollers music-db --namespace=mytunes scaled $ kubectl get pods --namespace=mytunes POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE music-db-0fwsu 10.244.2.48 kubernetes-minion-m49b/104.197.35.221 name=music-db Running 33 minutes es kubernetes/elasticsearch:1.0 Running 26 minutes music-db-2erje 10.244.2.50 kubernetes-minion-m49b/104.197.35.221 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 46 seconds music-db-5pc2e 10.244.0.24 kubernetes-minion-3c8c/146.148.41.184 name=music-db Running 33 minutes es kubernetes/elasticsearch:1.0 Running 32 minutes music-db-8rkvp 10.244.3.33 kubernetes-minion-zey5/104.154.59.10 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 46 seconds music-db-bjqmv 10.244.3.31 kubernetes-minion-zey5/104.154.59.10 name=music-db Running 33 minutes es kubernetes/elasticsearch:1.0 Running 26 minutes music-db-efc46 10.244.2.49 kubernetes-minion-m49b/104.197.35.221 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 46 seconds music-db-fhqyg 10.244.0.25 kubernetes-minion-3c8c/146.148.41.184 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 47 seconds music-db-guxe4 10.244.3.32 kubernetes-minion-zey5/104.154.59.10 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 46 seconds music-db-pbiq1 10.244.1.38 kubernetes-minion-f9dw/130.211.159.230 name=music-db Running 48 seconds es kubernetes/elasticsearch:1.0 Running 47 seconds music-db-swtrs 10.244.1.37 kubernetes-minion-f9dw/130.211.159.230 name=music-db Running 33 minutes es kubernetes/elasticsearch:1.0 Running 32 minutes ``` Let's check to make sure that these 10 nodes are part of the same Elasticsearch cluster: ``` $ curl 104.197.12.157:9200/_nodes?pretty=true | grep name "cluster_name" : "mytunes-db", "name" : "Killraven", "name" : "Killraven", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Tefral the Surveyor", "name" : "Tefral the Surveyor", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Jonas Harrow", "name" : "Jonas Harrow", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Warpath", "name" : "Warpath", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Brute I", "name" : "Brute I", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Callisto", "name" : "Callisto", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Vapor", "name" : "Vapor", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Timeslip", "name" : "Timeslip", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Magik", "name" : "Magik", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", "name" : "Brother Voodoo", "name" : "Brother Voodoo", "name" : "mytunes-db" "vm_name" : "OpenJDK 64-Bit Server VM", "name" : "eth0", ``` [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/release-0.20.0/examples/elasticsearch/README.md?pixel)]()
import { PlannerListModule } from 'fabric8-planner'; import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { Http } from '@angular/http'; @NgModule({ imports: [ CommonModule, PlannerListModule ] }) export class PlanListModule { constructor(http: Http) {} }
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // This has the implementation details of malloc_hook that are needed // to use malloc-hook inside the tcmalloc system. It does not hold // any of the client-facing calls that are used to add new hooks. #ifndef _MALLOC_HOOK_INL_H_ #define _MALLOC_HOOK_INL_H_ #include <stddef.h> #include <sys/types.h> #include "base/atomicops.h" #include "base/basictypes.h" #include <gperftools/malloc_hook.h> namespace base { namespace internal { // The following (implementation) code is DEPRECATED. // A simple atomic pointer class that can be initialized by the linker // when you define a namespace-scope variable as: // // AtomicPtr<Foo*> my_global = { &initial_value }; // // This isn't suitable for a general atomic<> class because of the // public access to data_. template<typename PtrT> class AtomicPtr { public: COMPILE_ASSERT(sizeof(PtrT) <= sizeof(AtomicWord), PtrT_should_fit_in_AtomicWord); PtrT Get() const { // Depending on the system, Acquire_Load(AtomicWord*) may have // been defined to return an AtomicWord, Atomic32, or Atomic64. // We hide that implementation detail here with an explicit cast. // This prevents MSVC 2005, at least, from complaining (it has to // do with __wp64; AtomicWord is __wp64, but Atomic32/64 aren't). return reinterpret_cast<PtrT>(static_cast<AtomicWord>( base::subtle::NoBarrier_Load(&data_))); } // Sets the contained value to new_val and returns the old value, // atomically, with acquire and release semantics. // This is a full-barrier instruction. PtrT Exchange(PtrT new_val); // Not private so that the class is an aggregate and can be // initialized by the linker. Don't access this directly. AtomicWord data_; }; // These are initialized in malloc_hook.cc extern AtomicPtr<MallocHook::NewHook> new_hook_; extern AtomicPtr<MallocHook::DeleteHook> delete_hook_; extern AtomicPtr<MallocHook::PreMmapHook> premmap_hook_; extern AtomicPtr<MallocHook::MmapHook> mmap_hook_; extern AtomicPtr<MallocHook::MunmapHook> munmap_hook_; extern AtomicPtr<MallocHook::MremapHook> mremap_hook_; extern AtomicPtr<MallocHook::PreSbrkHook> presbrk_hook_; extern AtomicPtr<MallocHook::SbrkHook> sbrk_hook_; // End DEPRECATED code. // Maximum of 7 hooks means that HookList is 8 words. static const int kHookListMaxValues = 7; // HookList: a class that provides synchronized insertions and removals and // lockless traversal. Most of the implementation is in malloc_hook.cc. template <typename T> struct PERFTOOLS_DLL_DECL HookList { COMPILE_ASSERT(sizeof(T) <= sizeof(AtomicWord), T_should_fit_in_AtomicWord); // Adds value to the list. Note that duplicates are allowed. Thread-safe and // blocking (acquires hooklist_spinlock). Returns true on success; false // otherwise (failures include invalid value and no space left). bool Add(T value); // Removes the first entry matching value from the list. Thread-safe and // blocking (acquires hooklist_spinlock). Returns true on success; false // otherwise (failures include invalid value and no value found). bool Remove(T value); // Store up to n values of the list in output_array, and return the number of // elements stored. Thread-safe and non-blocking. This is fast (one memory // access) if the list is empty. int Traverse(T* output_array, int n) const; // Fast inline implementation for fast path of Invoke*Hook. bool empty() const { return base::subtle::NoBarrier_Load(&priv_end) == 0; } // This internal data is not private so that the class is an aggregate and can // be initialized by the linker. Don't access this directly. Use the // INIT_HOOK_LIST macro in malloc_hook.cc. // One more than the index of the last valid element in priv_data. During // 'Remove' this may be past the last valid element in priv_data, but // subsequent values will be 0. AtomicWord priv_end; AtomicWord priv_data[kHookListMaxValues]; }; extern HookList<MallocHook::NewHook> new_hooks_; extern HookList<MallocHook::DeleteHook> delete_hooks_; extern HookList<MallocHook::PreMmapHook> premmap_hooks_; extern HookList<MallocHook::MmapHook> mmap_hooks_; extern HookList<MallocHook::MmapReplacement> mmap_replacement_; extern HookList<MallocHook::MunmapHook> munmap_hooks_; extern HookList<MallocHook::MunmapReplacement> munmap_replacement_; extern HookList<MallocHook::MremapHook> mremap_hooks_; extern HookList<MallocHook::PreSbrkHook> presbrk_hooks_; extern HookList<MallocHook::SbrkHook> sbrk_hooks_; } } // namespace base::internal // The following method is DEPRECATED inline MallocHook::NewHook MallocHook::GetNewHook() { return base::internal::new_hook_.Get(); } inline void MallocHook::InvokeNewHook(const void* p, size_t s) { if (!base::internal::new_hooks_.empty()) { InvokeNewHookSlow(p, s); } // The following code is DEPRECATED. MallocHook::NewHook hook = MallocHook::GetNewHook(); if (hook != NULL) (*hook)(p, s); // End DEPRECATED code. } // The following method is DEPRECATED inline MallocHook::DeleteHook MallocHook::GetDeleteHook() { return base::internal::delete_hook_.Get(); } inline void MallocHook::InvokeDeleteHook(const void* p) { if (!base::internal::delete_hooks_.empty()) { InvokeDeleteHookSlow(p); } // The following code is DEPRECATED. MallocHook::DeleteHook hook = MallocHook::GetDeleteHook(); if (hook != NULL) (*hook)(p); // End DEPRECATED code. } // The following method is DEPRECATED inline MallocHook::PreMmapHook MallocHook::GetPreMmapHook() { return base::internal::premmap_hook_.Get(); } inline void MallocHook::InvokePreMmapHook(const void* start, size_t size, int protection, int flags, int fd, off_t offset) { if (!base::internal::premmap_hooks_.empty()) { InvokePreMmapHookSlow(start, size, protection, flags, fd, offset); } // The following code is DEPRECATED. MallocHook::PreMmapHook hook = MallocHook::GetPreMmapHook(); if (hook != NULL) (*hook)(start, size, protection, flags, fd, offset); // End DEPRECATED code. } // The following method is DEPRECATED inline MallocHook::MmapHook MallocHook::GetMmapHook() { return base::internal::mmap_hook_.Get(); } inline void MallocHook::InvokeMmapHook(const void* result, const void* start, size_t size, int protection, int flags, int fd, off_t offset) { if (!base::internal::mmap_hooks_.empty()) { InvokeMmapHookSlow(result, start, size, protection, flags, fd, offset); } // The following code is DEPRECATED. MallocHook::MmapHook hook = MallocHook::GetMmapHook(); if (hook != NULL) (*hook)(result, start, size, protection, flags, fd, offset); // End DEPRECATED code. } inline bool MallocHook::InvokeMmapReplacement(const void* start, size_t size, int protection, int flags, int fd, off_t offset, void** result) { if (!base::internal::mmap_replacement_.empty()) { return InvokeMmapReplacementSlow(start, size, protection, flags, fd, offset, result); } return false; } // The following method is DEPRECATED inline MallocHook::MunmapHook MallocHook::GetMunmapHook() { return base::internal::munmap_hook_.Get(); } inline void MallocHook::InvokeMunmapHook(const void* p, size_t size) { if (!base::internal::munmap_hooks_.empty()) { InvokeMunmapHookSlow(p, size); } // The following code is DEPRECATED. MallocHook::MunmapHook hook = MallocHook::GetMunmapHook(); if (hook != NULL) (*hook)(p, size); // End DEPRECATED code. } inline bool MallocHook::InvokeMunmapReplacement( const void* p, size_t size, int* result) { if (!base::internal::mmap_replacement_.empty()) { return InvokeMunmapReplacementSlow(p, size, result); } return false; } // The following method is DEPRECATED inline MallocHook::MremapHook MallocHook::GetMremapHook() { return base::internal::mremap_hook_.Get(); } inline void MallocHook::InvokeMremapHook(const void* result, const void* old_addr, size_t old_size, size_t new_size, int flags, const void* new_addr) { if (!base::internal::mremap_hooks_.empty()) { InvokeMremapHookSlow(result, old_addr, old_size, new_size, flags, new_addr); } // The following code is DEPRECATED. MallocHook::MremapHook hook = MallocHook::GetMremapHook(); if (hook != NULL) (*hook)(result, old_addr, old_size, new_size, flags, new_addr); // End DEPRECATED code. } // The following method is DEPRECATED inline MallocHook::PreSbrkHook MallocHook::GetPreSbrkHook() { return base::internal::presbrk_hook_.Get(); } inline void MallocHook::InvokePreSbrkHook(ptrdiff_t increment) { if (!base::internal::presbrk_hooks_.empty() && increment != 0) { InvokePreSbrkHookSlow(increment); } // The following code is DEPRECATED. MallocHook::PreSbrkHook hook = MallocHook::GetPreSbrkHook(); if (hook != NULL && increment != 0) (*hook)(increment); // End DEPRECATED code. } // The following method is DEPRECATED inline MallocHook::SbrkHook MallocHook::GetSbrkHook() { return base::internal::sbrk_hook_.Get(); } inline void MallocHook::InvokeSbrkHook(const void* result, ptrdiff_t increment) { if (!base::internal::sbrk_hooks_.empty() && increment != 0) { InvokeSbrkHookSlow(result, increment); } // The following code is DEPRECATED. MallocHook::SbrkHook hook = MallocHook::GetSbrkHook(); if (hook != NULL && increment != 0) (*hook)(result, increment); // End DEPRECATED code. } #endif /* _MALLOC_HOOK_INL_H_ */
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh package v1 import ( reflect "reflect" api "k8s.io/kubernetes/pkg/api" resource "k8s.io/kubernetes/pkg/api/resource" conversion "k8s.io/kubernetes/pkg/conversion" ) func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) } out.VolumeID = in.VolumeID out.FSType = in.FSType out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } func convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Binding))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { return err } return nil } func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Capabilities))(in) } if in.Add != nil { out.Add = make([]Capability, len(in.Add)) for i := range in.Add { out.Add[i] = Capability(in.Add[i]) } } else { out.Add = nil } if in.Drop != nil { out.Drop = make([]Capability, len(in.Drop)) for i := range in.Drop { out.Drop[i] = Capability(in.Drop[i]) } } else { out.Drop = nil } return nil } func convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ComponentCondition))(in) } out.Type = ComponentConditionType(in.Type) out.Status = ConditionStatus(in.Status) out.Message = in.Message out.Error = in.Error return nil } func convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ComponentStatus))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Conditions != nil { out.Conditions = make([]ComponentCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_api_ComponentCondition_To_v1_ComponentCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } return nil } func convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ComponentStatusList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]ComponentStatus, len(in.Items)) for i := range in.Items { if err := convert_api_ComponentStatus_To_v1_ComponentStatus(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Container))(in) } out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } else { out.Args = nil } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]ContainerPort, len(in.Ports)) for i := range in.Ports { if err := convert_api_ContainerPort_To_v1_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } if in.Env != nil { out.Env = make([]EnvVar, len(in.Env)) for i := range in.Env { if err := convert_api_EnvVar_To_v1_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { return err } } } else { out.Env = nil } if err := convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := convert_api_VolumeMount_To_v1_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { return err } } } else { out.VolumeMounts = nil } if in.LivenessProbe != nil { out.LivenessProbe = new(Probe) if err := convert_api_Probe_To_v1_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(Probe) if err := convert_api_Probe_To_v1_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(Lifecycle) if err := convert_api_Lifecycle_To_v1_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) if in.SecurityContext != nil { out.SecurityContext = new(SecurityContext) if err := convert_api_SecurityContext_To_v1_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil } func convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerPort))(in) } out.Name = in.Name out.HostPort = in.HostPort out.ContainerPort = in.ContainerPort out.Protocol = Protocol(in.Protocol) out.HostIP = in.HostIP return nil } func convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerState))(in) } if in.Waiting != nil { out.Waiting = new(ContainerStateWaiting) if err := convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { return err } } else { out.Waiting = nil } if in.Running != nil { out.Running = new(ContainerStateRunning) if err := convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in.Running, out.Running, s); err != nil { return err } } else { out.Running = nil } if in.Terminated != nil { out.Terminated = new(ContainerStateTerminated) if err := convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { return err } } else { out.Terminated = nil } return nil } func convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStateRunning))(in) } if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { return err } return nil } func convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStateTerminated))(in) } out.ExitCode = in.ExitCode out.Signal = in.Signal out.Reason = in.Reason out.Message = in.Message if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { return err } if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { return err } out.ContainerID = in.ContainerID return nil } func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStateWaiting))(in) } out.Reason = in.Reason return nil } func convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStatus))(in) } out.Name = in.Name if err := convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { return err } if err := convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { return err } out.Ready = in.Ready out.RestartCount = in.RestartCount out.Image = in.Image out.ImageID = in.ImageID out.ContainerID = in.ContainerID return nil } func convert_api_Daemon_To_v1_Daemon(in *api.Daemon, out *Daemon, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Daemon))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_DaemonSpec_To_v1_DaemonSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_DaemonStatus_To_v1_DaemonStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_DaemonList_To_v1_DaemonList(in *api.DaemonList, out *DaemonList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.DaemonList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Daemon, len(in.Items)) for i := range in.Items { if err := convert_api_Daemon_To_v1_Daemon(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_DaemonSpec_To_v1_DaemonSpec(in *api.DaemonSpec, out *DaemonSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.DaemonSpec))(in) } if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { out.Selector[key] = val } } else { out.Selector = nil } if in.Template != nil { out.Template = new(PodTemplateSpec) if err := convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { out.Template = nil } return nil } func convert_api_DaemonStatus_To_v1_DaemonStatus(in *api.DaemonStatus, out *DaemonStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.DaemonStatus))(in) } out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } func convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.DeleteOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if in.GracePeriodSeconds != nil { out.GracePeriodSeconds = new(int64) *out.GracePeriodSeconds = *in.GracePeriodSeconds } else { out.GracePeriodSeconds = nil } return nil } func convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EmptyDirVolumeSource))(in) } out.Medium = StorageMedium(in.Medium) return nil } func convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EndpointAddress))(in) } out.IP = in.IP if in.TargetRef != nil { out.TargetRef = new(ObjectReference) if err := convert_api_ObjectReference_To_v1_ObjectReference(in.TargetRef, out.TargetRef, s); err != nil { return err } } else { out.TargetRef = nil } return nil } func convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EndpointPort))(in) } out.Name = in.Name out.Port = in.Port out.Protocol = Protocol(in.Protocol) return nil } func convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EndpointSubset))(in) } if in.Addresses != nil { out.Addresses = make([]EndpointAddress, len(in.Addresses)) for i := range in.Addresses { if err := convert_api_EndpointAddress_To_v1_EndpointAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { return err } } } else { out.Addresses = nil } if in.Ports != nil { out.Ports = make([]EndpointPort, len(in.Ports)) for i := range in.Ports { if err := convert_api_EndpointPort_To_v1_EndpointPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } return nil } func convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Endpoints))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Subsets != nil { out.Subsets = make([]EndpointSubset, len(in.Subsets)) for i := range in.Subsets { if err := convert_api_EndpointSubset_To_v1_EndpointSubset(&in.Subsets[i], &out.Subsets[i], s); err != nil { return err } } } else { out.Subsets = nil } return nil } func convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EndpointsList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Endpoints, len(in.Items)) for i := range in.Items { if err := convert_api_Endpoints_To_v1_Endpoints(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EnvVar))(in) } out.Name = in.Name out.Value = in.Value if in.ValueFrom != nil { out.ValueFrom = new(EnvVarSource) if err := convert_api_EnvVarSource_To_v1_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { return err } } else { out.ValueFrom = nil } return nil } func convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EnvVarSource))(in) } if in.FieldRef != nil { out.FieldRef = new(ObjectFieldSelector) if err := convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { return err } } else { out.FieldRef = nil } return nil } func convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Event))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err } out.Reason = in.Reason out.Message = in.Message if err := convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { return err } if err := s.Convert(&in.FirstTimestamp, &out.FirstTimestamp, 0); err != nil { return err } if err := s.Convert(&in.LastTimestamp, &out.LastTimestamp, 0); err != nil { return err } out.Count = in.Count return nil } func convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EventList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Event, len(in.Items)) for i := range in.Items { if err := convert_api_Event_To_v1_Event(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.EventSource))(in) } out.Component = in.Component out.Host = in.Host return nil } func convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ExecAction))(in) } if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } return nil } func convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in) } out.PDName = in.PDName out.FSType = in.FSType out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } func convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.GitRepoVolumeSource))(in) } out.Repository = in.Repository out.Revision = in.Revision return nil } func convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.GlusterfsVolumeSource))(in) } out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } func convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.HTTPGetAction))(in) } out.Path = in.Path if err := s.Convert(&in.Port, &out.Port, 0); err != nil { return err } out.Host = in.Host out.Scheme = URIScheme(in.Scheme) return nil } func convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Handler))(in) } if in.Exec != nil { out.Exec = new(ExecAction) if err := convert_api_ExecAction_To_v1_ExecAction(in.Exec, out.Exec, s); err != nil { return err } } else { out.Exec = nil } if in.HTTPGet != nil { out.HTTPGet = new(HTTPGetAction) if err := convert_api_HTTPGetAction_To_v1_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { return err } } else { out.HTTPGet = nil } if in.TCPSocket != nil { out.TCPSocket = new(TCPSocketAction) if err := convert_api_TCPSocketAction_To_v1_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { return err } } else { out.TCPSocket = nil } return nil } func convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.HostPathVolumeSource))(in) } out.Path = in.Path return nil } func convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ISCSIVolumeSource))(in) } out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun out.FSType = in.FSType out.ReadOnly = in.ReadOnly return nil } func convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Lifecycle))(in) } if in.PostStart != nil { out.PostStart = new(Handler) if err := convert_api_Handler_To_v1_Handler(in.PostStart, out.PostStart, s); err != nil { return err } } else { out.PostStart = nil } if in.PreStop != nil { out.PreStop = new(Handler) if err := convert_api_Handler_To_v1_Handler(in.PreStop, out.PreStop, s); err != nil { return err } } else { out.PreStop = nil } return nil } func convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LimitRange))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LimitRangeItem))(in) } out.Type = LimitType(in.Type) if in.Max != nil { out.Max = make(ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Max[ResourceName(key)] = newVal } } else { out.Max = nil } if in.Min != nil { out.Min = make(ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Min[ResourceName(key)] = newVal } } else { out.Min = nil } if in.Default != nil { out.Default = make(ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Default[ResourceName(key)] = newVal } } else { out.Default = nil } return nil } func convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LimitRangeList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]LimitRange, len(in.Items)) for i := range in.Items { if err := convert_api_LimitRange_To_v1_LimitRange(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LimitRangeSpec))(in) } if in.Limits != nil { out.Limits = make([]LimitRangeItem, len(in.Limits)) for i := range in.Limits { if err := convert_api_LimitRangeItem_To_v1_LimitRangeItem(&in.Limits[i], &out.Limits[i], s); err != nil { return err } } } else { out.Limits = nil } return nil } func convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.List))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if err := s.Convert(&in.Items, &out.Items, 0); err != nil { return err } return nil } func convert_api_ListMeta_To_v1_ListMeta(in *api.ListMeta, out *ListMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ListMeta))(in) } out.SelfLink = in.SelfLink out.ResourceVersion = in.ResourceVersion return nil } func convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ListOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := s.Convert(&in.LabelSelector, &out.LabelSelector, 0); err != nil { return err } if err := s.Convert(&in.FieldSelector, &out.FieldSelector, 0); err != nil { return err } out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion return nil } func convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LoadBalancerIngress))(in) } out.IP = in.IP out.Hostname = in.Hostname return nil } func convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LoadBalancerStatus))(in) } if in.Ingress != nil { out.Ingress = make([]LoadBalancerIngress, len(in.Ingress)) for i := range in.Ingress { if err := convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { return err } } } else { out.Ingress = nil } return nil } func convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.LocalObjectReference))(in) } out.Name = in.Name return nil } func convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NFSVolumeSource))(in) } out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } func convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Namespace))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NamespaceList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Namespace, len(in.Items)) for i := range in.Items { if err := convert_api_Namespace_To_v1_Namespace(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NamespaceSpec))(in) } if in.Finalizers != nil { out.Finalizers = make([]FinalizerName, len(in.Finalizers)) for i := range in.Finalizers { out.Finalizers[i] = FinalizerName(in.Finalizers[i]) } } else { out.Finalizers = nil } return nil } func convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NamespaceStatus))(in) } out.Phase = NamespacePhase(in.Phase) return nil } func convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Node))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeAddress))(in) } out.Type = NodeAddressType(in.Type) out.Address = in.Address return nil } func convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeCondition))(in) } out.Type = NodeConditionType(in.Type) out.Status = ConditionStatus(in.Status) if err := s.Convert(&in.LastHeartbeatTime, &out.LastHeartbeatTime, 0); err != nil { return err } if err := s.Convert(&in.LastTransitionTime, &out.LastTransitionTime, 0); err != nil { return err } out.Reason = in.Reason out.Message = in.Message return nil } func convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Node, len(in.Items)) for i := range in.Items { if err := convert_api_Node_To_v1_Node(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeSpec))(in) } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } func convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeStatus))(in) } if in.Capacity != nil { out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[ResourceName(key)] = newVal } } else { out.Capacity = nil } out.Phase = NodePhase(in.Phase) if in.Conditions != nil { out.Conditions = make([]NodeCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_api_NodeCondition_To_v1_NodeCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } if in.Addresses != nil { out.Addresses = make([]NodeAddress, len(in.Addresses)) for i := range in.Addresses { if err := convert_api_NodeAddress_To_v1_NodeAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { return err } } } else { out.Addresses = nil } if err := convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { return err } return nil } func convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.NodeSystemInfo))(in) } out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID out.KernelVersion = in.KernelVersion out.OsImage = in.OsImage out.ContainerRuntimeVersion = in.ContainerRuntimeVersion out.KubeletVersion = in.KubeletVersion out.KubeProxyVersion = in.KubeProxyVersion return nil } func convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ObjectFieldSelector))(in) } out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil } func convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ObjectMeta))(in) } out.Name = in.Name out.GenerateName = in.GenerateName out.Namespace = in.Namespace out.SelfLink = in.SelfLink out.UID = in.UID out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation if err := s.Convert(&in.CreationTimestamp, &out.CreationTimestamp, 0); err != nil { return err } if in.DeletionTimestamp != nil { if err := s.Convert(&in.DeletionTimestamp, &out.DeletionTimestamp, 0); err != nil { return err } } else { out.DeletionTimestamp = nil } if in.DeletionGracePeriodSeconds != nil { out.DeletionGracePeriodSeconds = new(int64) *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds } else { out.DeletionGracePeriodSeconds = nil } if in.Labels != nil { out.Labels = make(map[string]string) for key, val := range in.Labels { out.Labels[key] = val } } else { out.Labels = nil } if in.Annotations != nil { out.Annotations = make(map[string]string) for key, val := range in.Annotations { out.Annotations[key] = val } } else { out.Annotations = nil } return nil } func convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ObjectReference))(in) } out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name out.UID = in.UID out.APIVersion = in.APIVersion out.ResourceVersion = in.ResourceVersion out.FieldPath = in.FieldPath return nil } func convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolume))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeClaim))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeClaimList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]PersistentVolumeClaim, len(in.Items)) for i := range in.Items { if err := convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeClaimSpec))(in) } if in.AccessModes != nil { out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if err := convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName return nil } func convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeClaimStatus))(in) } out.Phase = PersistentVolumeClaimPhase(in.Phase) if in.AccessModes != nil { out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if in.Capacity != nil { out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[ResourceName(key)] = newVal } } else { out.Capacity = nil } return nil } func convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeClaimVolumeSource))(in) } out.ClaimName = in.ClaimName out.ReadOnly = in.ReadOnly return nil } func convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]PersistentVolume, len(in.Items)) for i := range in.Items { if err := convert_api_PersistentVolume_To_v1_PersistentVolume(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeSource))(in) } if in.GCEPersistentDisk != nil { out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) if err := convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) if err := convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.HostPath != nil { out.HostPath = new(HostPathVolumeSource) if err := convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { return err } } else { out.HostPath = nil } if in.Glusterfs != nil { out.Glusterfs = new(GlusterfsVolumeSource) if err := convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { return err } } else { out.Glusterfs = nil } if in.NFS != nil { out.NFS = new(NFSVolumeSource) if err := convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { return err } } else { out.NFS = nil } if in.RBD != nil { out.RBD = new(RBDVolumeSource) if err := convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { return err } } else { out.RBD = nil } if in.ISCSI != nil { out.ISCSI = new(ISCSIVolumeSource) if err := convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { return err } } else { out.ISCSI = nil } return nil } func convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeSpec))(in) } if in.Capacity != nil { out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[ResourceName(key)] = newVal } } else { out.Capacity = nil } if err := convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { return err } if in.AccessModes != nil { out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if in.ClaimRef != nil { out.ClaimRef = new(ObjectReference) if err := convert_api_ObjectReference_To_v1_ObjectReference(in.ClaimRef, out.ClaimRef, s); err != nil { return err } } else { out.ClaimRef = nil } out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) return nil } func convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PersistentVolumeStatus))(in) } out.Phase = PersistentVolumePhase(in.Phase) out.Message = in.Message out.Reason = in.Reason return nil } func convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Pod))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodAttachOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container return nil } func convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodCondition))(in) } out.Type = PodConditionType(in.Type) out.Status = ConditionStatus(in.Status) return nil } func convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodExecOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } return nil } func convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Pod, len(in.Items)) for i := range in.Items { if err := convert_api_Pod_To_v1_Pod(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodLogOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Container = in.Container out.Follow = in.Follow out.Previous = in.Previous return nil } func convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodProxyOptions))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Path = in.Path return nil } func convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodStatus))(in) } out.Phase = PodPhase(in.Phase) if in.Conditions != nil { out.Conditions = make([]PodCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_api_PodCondition_To_v1_PodCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } out.Message = in.Message out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { if err := s.Convert(&in.StartTime, &out.StartTime, 0); err != nil { return err } } else { out.StartTime = nil } if in.ContainerStatuses != nil { out.ContainerStatuses = make([]ContainerStatus, len(in.ContainerStatuses)) for i := range in.ContainerStatuses { if err := convert_api_ContainerStatus_To_v1_ContainerStatus(&in.ContainerStatuses[i], &out.ContainerStatuses[i], s); err != nil { return err } } } else { out.ContainerStatuses = nil } return nil } func convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodStatusResult))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodTemplate))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodTemplateList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]PodTemplate, len(in.Items)) for i := range in.Items { if err := convert_api_PodTemplate_To_v1_PodTemplate(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodTemplateSpec))(in) } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } func convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Probe))(in) } if err := convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { return err } out.InitialDelaySeconds = in.InitialDelaySeconds out.TimeoutSeconds = in.TimeoutSeconds return nil } func convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.RBDVolumeSource))(in) } if in.CephMonitors != nil { out.CephMonitors = make([]string, len(in.CephMonitors)) for i := range in.CephMonitors { out.CephMonitors[i] = in.CephMonitors[i] } } else { out.CephMonitors = nil } out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool out.RadosUser = in.RadosUser out.Keyring = in.Keyring if in.SecretRef != nil { out.SecretRef = new(LocalObjectReference) if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { return err } } else { out.SecretRef = nil } out.ReadOnly = in.ReadOnly return nil } func convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.RangeAllocation))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } out.Range = in.Range if err := s.Convert(&in.Data, &out.Data, 0); err != nil { return err } return nil } func convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ReplicationController))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ReplicationControllerList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]ReplicationController, len(in.Items)) for i := range in.Items { if err := convert_api_ReplicationController_To_v1_ReplicationController(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ReplicationControllerStatus))(in) } out.Replicas = in.Replicas out.ObservedGeneration = in.ObservedGeneration return nil } func convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ResourceQuota))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ResourceQuotaList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]ResourceQuota, len(in.Items)) for i := range in.Items { if err := convert_api_ResourceQuota_To_v1_ResourceQuota(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ResourceQuotaSpec))(in) } if in.Hard != nil { out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Hard[ResourceName(key)] = newVal } } else { out.Hard = nil } return nil } func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ResourceQuotaStatus))(in) } if in.Hard != nil { out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Hard[ResourceName(key)] = newVal } } else { out.Hard = nil } if in.Used != nil { out.Used = make(ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Used[ResourceName(key)] = newVal } } else { out.Used = nil } return nil } func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ResourceRequirements))(in) } if in.Limits != nil { out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Limits[ResourceName(key)] = newVal } } else { out.Limits = nil } if in.Requests != nil { out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Requests[ResourceName(key)] = newVal } } else { out.Requests = nil } return nil } func convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.SELinuxOptions))(in) } out.User = in.User out.Role = in.Role out.Type = in.Type out.Level = in.Level return nil } func convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Secret))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Data != nil { out.Data = make(map[string][]uint8) for key, val := range in.Data { newVal := []uint8{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Data[key] = newVal } } else { out.Data = nil } out.Type = SecretType(in.Type) return nil } func convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.SecretList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Secret, len(in.Items)) for i := range in.Items { if err := convert_api_Secret_To_v1_Secret(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.SecretVolumeSource))(in) } out.SecretName = in.SecretName return nil } func convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.SecurityContext))(in) } if in.Capabilities != nil { out.Capabilities = new(Capabilities) if err := convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { return err } } else { out.Capabilities = nil } if in.Privileged != nil { out.Privileged = new(bool) *out.Privileged = *in.Privileged } else { out.Privileged = nil } if in.SELinuxOptions != nil { out.SELinuxOptions = new(SELinuxOptions) if err := convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { out.SELinuxOptions = nil } if in.RunAsUser != nil { out.RunAsUser = new(int64) *out.RunAsUser = *in.RunAsUser } else { out.RunAsUser = nil } out.RunAsNonRoot = in.RunAsNonRoot return nil } func convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.SerializedReference))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { return err } return nil } func convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Service))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceAccount))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Secrets != nil { out.Secrets = make([]ObjectReference, len(in.Secrets)) for i := range in.Secrets { if err := convert_api_ObjectReference_To_v1_ObjectReference(&in.Secrets[i], &out.Secrets[i], s); err != nil { return err } } } else { out.Secrets = nil } if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) for i := range in.ImagePullSecrets { if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { return err } } } else { out.ImagePullSecrets = nil } return nil } func convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceAccountList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]ServiceAccount, len(in.Items)) for i := range in.Items { if err := convert_api_ServiceAccount_To_v1_ServiceAccount(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]Service, len(in.Items)) for i := range in.Items { if err := convert_api_Service_To_v1_Service(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServicePort))(in) } out.Name = in.Name out.Protocol = Protocol(in.Protocol) out.Port = in.Port if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil { return err } out.NodePort = in.NodePort return nil } func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceSpec))(in) } if in.Ports != nil { out.Ports = make([]ServicePort, len(in.Ports)) for i := range in.Ports { if err := convert_api_ServicePort_To_v1_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { out.Selector[key] = val } } else { out.Selector = nil } out.ClusterIP = in.ClusterIP out.Type = ServiceType(in.Type) if in.ExternalIPs != nil { out.ExternalIPs = make([]string, len(in.ExternalIPs)) for i := range in.ExternalIPs { out.ExternalIPs[i] = in.ExternalIPs[i] } } else { out.ExternalIPs = nil } out.SessionAffinity = ServiceAffinity(in.SessionAffinity) return nil } func convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ServiceStatus))(in) } if err := convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { return err } return nil } func convert_api_Status_To_v1_Status(in *api.Status, out *Status, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Status))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ListMeta_To_v1_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } out.Status = in.Status out.Message = in.Message out.Reason = StatusReason(in.Reason) if in.Details != nil { out.Details = new(StatusDetails) if err := convert_api_StatusDetails_To_v1_StatusDetails(in.Details, out.Details, s); err != nil { return err } } else { out.Details = nil } out.Code = in.Code return nil } func convert_api_StatusCause_To_v1_StatusCause(in *api.StatusCause, out *StatusCause, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.StatusCause))(in) } out.Type = CauseType(in.Type) out.Message = in.Message out.Field = in.Field return nil } func convert_api_StatusDetails_To_v1_StatusDetails(in *api.StatusDetails, out *StatusDetails, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.StatusDetails))(in) } out.Name = in.Name out.Kind = in.Kind if in.Causes != nil { out.Causes = make([]StatusCause, len(in.Causes)) for i := range in.Causes { if err := convert_api_StatusCause_To_v1_StatusCause(&in.Causes[i], &out.Causes[i], s); err != nil { return err } } } else { out.Causes = nil } out.RetryAfterSeconds = in.RetryAfterSeconds return nil } func convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.TCPSocketAction))(in) } if err := s.Convert(&in.Port, &out.Port, 0); err != nil { return err } return nil } func convert_api_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(in *api.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ThirdPartyResourceData))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := s.Convert(&in.Data, &out.Data, 0); err != nil { return err } return nil } func convert_api_TypeMeta_To_v1_TypeMeta(in *api.TypeMeta, out *TypeMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.TypeMeta))(in) } out.Kind = in.Kind out.APIVersion = in.APIVersion return nil } func convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.Volume))(in) } out.Name = in.Name if err := convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { return err } return nil } func convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.VolumeMount))(in) } out.Name = in.Name out.ReadOnly = in.ReadOnly out.MountPath = in.MountPath return nil } func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.VolumeSource))(in) } if in.HostPath != nil { out.HostPath = new(HostPathVolumeSource) if err := convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { return err } } else { out.HostPath = nil } if in.EmptyDir != nil { out.EmptyDir = new(EmptyDirVolumeSource) if err := convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { return err } } else { out.EmptyDir = nil } if in.GCEPersistentDisk != nil { out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) if err := convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) if err := convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.GitRepo != nil { out.GitRepo = new(GitRepoVolumeSource) if err := convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { return err } } else { out.GitRepo = nil } if in.Secret != nil { out.Secret = new(SecretVolumeSource) if err := convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { return err } } else { out.Secret = nil } if in.NFS != nil { out.NFS = new(NFSVolumeSource) if err := convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { return err } } else { out.NFS = nil } if in.ISCSI != nil { out.ISCSI = new(ISCSIVolumeSource) if err := convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { return err } } else { out.ISCSI = nil } if in.Glusterfs != nil { out.Glusterfs = new(GlusterfsVolumeSource) if err := convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { return err } } else { out.Glusterfs = nil } if in.PersistentVolumeClaim != nil { out.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) if err := convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { return err } } else { out.PersistentVolumeClaim = nil } if in.RBD != nil { out.RBD = new(RBDVolumeSource) if err := convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { return err } } else { out.RBD = nil } return nil } func convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*AWSElasticBlockStoreVolumeSource))(in) } out.VolumeID = in.VolumeID out.FSType = in.FSType out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } func convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Binding))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { return err } return nil } func convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Capabilities))(in) } if in.Add != nil { out.Add = make([]api.Capability, len(in.Add)) for i := range in.Add { out.Add[i] = api.Capability(in.Add[i]) } } else { out.Add = nil } if in.Drop != nil { out.Drop = make([]api.Capability, len(in.Drop)) for i := range in.Drop { out.Drop[i] = api.Capability(in.Drop[i]) } } else { out.Drop = nil } return nil } func convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ComponentCondition))(in) } out.Type = api.ComponentConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) out.Message = in.Message out.Error = in.Error return nil } func convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ComponentStatus))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Conditions != nil { out.Conditions = make([]api.ComponentCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_v1_ComponentCondition_To_api_ComponentCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } return nil } func convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ComponentStatusList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.ComponentStatus, len(in.Items)) for i := range in.Items { if err := convert_v1_ComponentStatus_To_api_ComponentStatus(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Container))(in) } out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } else { out.Args = nil } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]api.ContainerPort, len(in.Ports)) for i := range in.Ports { if err := convert_v1_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } if in.Env != nil { out.Env = make([]api.EnvVar, len(in.Env)) for i := range in.Env { if err := convert_v1_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { return err } } } else { out.Env = nil } if err := convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := convert_v1_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { return err } } } else { out.VolumeMounts = nil } if in.LivenessProbe != nil { out.LivenessProbe = new(api.Probe) if err := convert_v1_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(api.Probe) if err := convert_v1_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(api.Lifecycle) if err := convert_v1_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) if in.SecurityContext != nil { out.SecurityContext = new(api.SecurityContext) if err := convert_v1_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil } func convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerPort))(in) } out.Name = in.Name out.HostPort = in.HostPort out.ContainerPort = in.ContainerPort out.Protocol = api.Protocol(in.Protocol) out.HostIP = in.HostIP return nil } func convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerState))(in) } if in.Waiting != nil { out.Waiting = new(api.ContainerStateWaiting) if err := convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { return err } } else { out.Waiting = nil } if in.Running != nil { out.Running = new(api.ContainerStateRunning) if err := convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in.Running, out.Running, s); err != nil { return err } } else { out.Running = nil } if in.Terminated != nil { out.Terminated = new(api.ContainerStateTerminated) if err := convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { return err } } else { out.Terminated = nil } return nil } func convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStateRunning))(in) } if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { return err } return nil } func convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStateTerminated))(in) } out.ExitCode = in.ExitCode out.Signal = in.Signal out.Reason = in.Reason out.Message = in.Message if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { return err } if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { return err } out.ContainerID = in.ContainerID return nil } func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStateWaiting))(in) } out.Reason = in.Reason return nil } func convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStatus))(in) } out.Name = in.Name if err := convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { return err } if err := convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { return err } out.Ready = in.Ready out.RestartCount = in.RestartCount out.Image = in.Image out.ImageID = in.ImageID out.ContainerID = in.ContainerID return nil } func convert_v1_Daemon_To_api_Daemon(in *Daemon, out *api.Daemon, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Daemon))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_DaemonSpec_To_api_DaemonSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_DaemonStatus_To_api_DaemonStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_DaemonList_To_api_DaemonList(in *DaemonList, out *api.DaemonList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Daemon, len(in.Items)) for i := range in.Items { if err := convert_v1_Daemon_To_api_Daemon(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_DaemonSpec_To_api_DaemonSpec(in *DaemonSpec, out *api.DaemonSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonSpec))(in) } if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { out.Selector[key] = val } } else { out.Selector = nil } if in.Template != nil { out.Template = new(api.PodTemplateSpec) if err := convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { out.Template = nil } return nil } func convert_v1_DaemonStatus_To_api_DaemonStatus(in *DaemonStatus, out *api.DaemonStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DaemonStatus))(in) } out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } func convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*DeleteOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if in.GracePeriodSeconds != nil { out.GracePeriodSeconds = new(int64) *out.GracePeriodSeconds = *in.GracePeriodSeconds } else { out.GracePeriodSeconds = nil } return nil } func convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EmptyDirVolumeSource))(in) } out.Medium = api.StorageMedium(in.Medium) return nil } func convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EndpointAddress))(in) } out.IP = in.IP if in.TargetRef != nil { out.TargetRef = new(api.ObjectReference) if err := convert_v1_ObjectReference_To_api_ObjectReference(in.TargetRef, out.TargetRef, s); err != nil { return err } } else { out.TargetRef = nil } return nil } func convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EndpointPort))(in) } out.Name = in.Name out.Port = in.Port out.Protocol = api.Protocol(in.Protocol) return nil } func convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EndpointSubset))(in) } if in.Addresses != nil { out.Addresses = make([]api.EndpointAddress, len(in.Addresses)) for i := range in.Addresses { if err := convert_v1_EndpointAddress_To_api_EndpointAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { return err } } } else { out.Addresses = nil } if in.Ports != nil { out.Ports = make([]api.EndpointPort, len(in.Ports)) for i := range in.Ports { if err := convert_v1_EndpointPort_To_api_EndpointPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } return nil } func convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Endpoints))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Subsets != nil { out.Subsets = make([]api.EndpointSubset, len(in.Subsets)) for i := range in.Subsets { if err := convert_v1_EndpointSubset_To_api_EndpointSubset(&in.Subsets[i], &out.Subsets[i], s); err != nil { return err } } } else { out.Subsets = nil } return nil } func convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EndpointsList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Endpoints, len(in.Items)) for i := range in.Items { if err := convert_v1_Endpoints_To_api_Endpoints(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EnvVar))(in) } out.Name = in.Name out.Value = in.Value if in.ValueFrom != nil { out.ValueFrom = new(api.EnvVarSource) if err := convert_v1_EnvVarSource_To_api_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { return err } } else { out.ValueFrom = nil } return nil } func convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EnvVarSource))(in) } if in.FieldRef != nil { out.FieldRef = new(api.ObjectFieldSelector) if err := convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { return err } } else { out.FieldRef = nil } return nil } func convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Event))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err } out.Reason = in.Reason out.Message = in.Message if err := convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { return err } if err := s.Convert(&in.FirstTimestamp, &out.FirstTimestamp, 0); err != nil { return err } if err := s.Convert(&in.LastTimestamp, &out.LastTimestamp, 0); err != nil { return err } out.Count = in.Count return nil } func convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EventList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Event, len(in.Items)) for i := range in.Items { if err := convert_v1_Event_To_api_Event(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*EventSource))(in) } out.Component = in.Component out.Host = in.Host return nil } func convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ExecAction))(in) } if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } return nil } func convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*GCEPersistentDiskVolumeSource))(in) } out.PDName = in.PDName out.FSType = in.FSType out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } func convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*GitRepoVolumeSource))(in) } out.Repository = in.Repository out.Revision = in.Revision return nil } func convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*GlusterfsVolumeSource))(in) } out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } func convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HTTPGetAction))(in) } out.Path = in.Path if err := s.Convert(&in.Port, &out.Port, 0); err != nil { return err } out.Host = in.Host out.Scheme = api.URIScheme(in.Scheme) return nil } func convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Handler))(in) } if in.Exec != nil { out.Exec = new(api.ExecAction) if err := convert_v1_ExecAction_To_api_ExecAction(in.Exec, out.Exec, s); err != nil { return err } } else { out.Exec = nil } if in.HTTPGet != nil { out.HTTPGet = new(api.HTTPGetAction) if err := convert_v1_HTTPGetAction_To_api_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { return err } } else { out.HTTPGet = nil } if in.TCPSocket != nil { out.TCPSocket = new(api.TCPSocketAction) if err := convert_v1_TCPSocketAction_To_api_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { return err } } else { out.TCPSocket = nil } return nil } func convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HostPathVolumeSource))(in) } out.Path = in.Path return nil } func convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ISCSIVolumeSource))(in) } out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun out.FSType = in.FSType out.ReadOnly = in.ReadOnly return nil } func convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Lifecycle))(in) } if in.PostStart != nil { out.PostStart = new(api.Handler) if err := convert_v1_Handler_To_api_Handler(in.PostStart, out.PostStart, s); err != nil { return err } } else { out.PostStart = nil } if in.PreStop != nil { out.PreStop = new(api.Handler) if err := convert_v1_Handler_To_api_Handler(in.PreStop, out.PreStop, s); err != nil { return err } } else { out.PreStop = nil } return nil } func convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LimitRange))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LimitRangeItem))(in) } out.Type = api.LimitType(in.Type) if in.Max != nil { out.Max = make(api.ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Max[api.ResourceName(key)] = newVal } } else { out.Max = nil } if in.Min != nil { out.Min = make(api.ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Min[api.ResourceName(key)] = newVal } } else { out.Min = nil } if in.Default != nil { out.Default = make(api.ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Default[api.ResourceName(key)] = newVal } } else { out.Default = nil } return nil } func convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LimitRangeList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.LimitRange, len(in.Items)) for i := range in.Items { if err := convert_v1_LimitRange_To_api_LimitRange(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LimitRangeSpec))(in) } if in.Limits != nil { out.Limits = make([]api.LimitRangeItem, len(in.Limits)) for i := range in.Limits { if err := convert_v1_LimitRangeItem_To_api_LimitRangeItem(&in.Limits[i], &out.Limits[i], s); err != nil { return err } } } else { out.Limits = nil } return nil } func convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*List))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if err := s.Convert(&in.Items, &out.Items, 0); err != nil { return err } return nil } func convert_v1_ListMeta_To_api_ListMeta(in *ListMeta, out *api.ListMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ListMeta))(in) } out.SelfLink = in.SelfLink out.ResourceVersion = in.ResourceVersion return nil } func convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ListOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := s.Convert(&in.LabelSelector, &out.LabelSelector, 0); err != nil { return err } if err := s.Convert(&in.FieldSelector, &out.FieldSelector, 0); err != nil { return err } out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion return nil } func convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LoadBalancerIngress))(in) } out.IP = in.IP out.Hostname = in.Hostname return nil } func convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LoadBalancerStatus))(in) } if in.Ingress != nil { out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress)) for i := range in.Ingress { if err := convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { return err } } } else { out.Ingress = nil } return nil } func convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*LocalObjectReference))(in) } out.Name = in.Name return nil } func convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NFSVolumeSource))(in) } out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } func convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Namespace))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NamespaceList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Namespace, len(in.Items)) for i := range in.Items { if err := convert_v1_Namespace_To_api_Namespace(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NamespaceSpec))(in) } if in.Finalizers != nil { out.Finalizers = make([]api.FinalizerName, len(in.Finalizers)) for i := range in.Finalizers { out.Finalizers[i] = api.FinalizerName(in.Finalizers[i]) } } else { out.Finalizers = nil } return nil } func convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NamespaceStatus))(in) } out.Phase = api.NamespacePhase(in.Phase) return nil } func convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Node))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeAddress))(in) } out.Type = api.NodeAddressType(in.Type) out.Address = in.Address return nil } func convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeCondition))(in) } out.Type = api.NodeConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) if err := s.Convert(&in.LastHeartbeatTime, &out.LastHeartbeatTime, 0); err != nil { return err } if err := s.Convert(&in.LastTransitionTime, &out.LastTransitionTime, 0); err != nil { return err } out.Reason = in.Reason out.Message = in.Message return nil } func convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Node, len(in.Items)) for i := range in.Items { if err := convert_v1_Node_To_api_Node(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeSpec))(in) } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } func convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeStatus))(in) } if in.Capacity != nil { out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[api.ResourceName(key)] = newVal } } else { out.Capacity = nil } out.Phase = api.NodePhase(in.Phase) if in.Conditions != nil { out.Conditions = make([]api.NodeCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_v1_NodeCondition_To_api_NodeCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } if in.Addresses != nil { out.Addresses = make([]api.NodeAddress, len(in.Addresses)) for i := range in.Addresses { if err := convert_v1_NodeAddress_To_api_NodeAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { return err } } } else { out.Addresses = nil } if err := convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { return err } return nil } func convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*NodeSystemInfo))(in) } out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID out.KernelVersion = in.KernelVersion out.OsImage = in.OsImage out.ContainerRuntimeVersion = in.ContainerRuntimeVersion out.KubeletVersion = in.KubeletVersion out.KubeProxyVersion = in.KubeProxyVersion return nil } func convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ObjectFieldSelector))(in) } out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil } func convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ObjectMeta))(in) } out.Name = in.Name out.GenerateName = in.GenerateName out.Namespace = in.Namespace out.SelfLink = in.SelfLink out.UID = in.UID out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation if err := s.Convert(&in.CreationTimestamp, &out.CreationTimestamp, 0); err != nil { return err } if in.DeletionTimestamp != nil { if err := s.Convert(&in.DeletionTimestamp, &out.DeletionTimestamp, 0); err != nil { return err } } else { out.DeletionTimestamp = nil } if in.DeletionGracePeriodSeconds != nil { out.DeletionGracePeriodSeconds = new(int64) *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds } else { out.DeletionGracePeriodSeconds = nil } if in.Labels != nil { out.Labels = make(map[string]string) for key, val := range in.Labels { out.Labels[key] = val } } else { out.Labels = nil } if in.Annotations != nil { out.Annotations = make(map[string]string) for key, val := range in.Annotations { out.Annotations[key] = val } } else { out.Annotations = nil } return nil } func convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ObjectReference))(in) } out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name out.UID = in.UID out.APIVersion = in.APIVersion out.ResourceVersion = in.ResourceVersion out.FieldPath = in.FieldPath return nil } func convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolume))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeClaim))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeClaimList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.PersistentVolumeClaim, len(in.Items)) for i := range in.Items { if err := convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeClaimSpec))(in) } if in.AccessModes != nil { out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if err := convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } out.VolumeName = in.VolumeName return nil } func convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeClaimStatus))(in) } out.Phase = api.PersistentVolumeClaimPhase(in.Phase) if in.AccessModes != nil { out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if in.Capacity != nil { out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[api.ResourceName(key)] = newVal } } else { out.Capacity = nil } return nil } func convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeClaimVolumeSource))(in) } out.ClaimName = in.ClaimName out.ReadOnly = in.ReadOnly return nil } func convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.PersistentVolume, len(in.Items)) for i := range in.Items { if err := convert_v1_PersistentVolume_To_api_PersistentVolume(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeSource))(in) } if in.GCEPersistentDisk != nil { out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) if err := convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) if err := convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.HostPath != nil { out.HostPath = new(api.HostPathVolumeSource) if err := convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { return err } } else { out.HostPath = nil } if in.Glusterfs != nil { out.Glusterfs = new(api.GlusterfsVolumeSource) if err := convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { return err } } else { out.Glusterfs = nil } if in.NFS != nil { out.NFS = new(api.NFSVolumeSource) if err := convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { return err } } else { out.NFS = nil } if in.RBD != nil { out.RBD = new(api.RBDVolumeSource) if err := convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { return err } } else { out.RBD = nil } if in.ISCSI != nil { out.ISCSI = new(api.ISCSIVolumeSource) if err := convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { return err } } else { out.ISCSI = nil } return nil } func convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeSpec))(in) } if in.Capacity != nil { out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Capacity[api.ResourceName(key)] = newVal } } else { out.Capacity = nil } if err := convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { return err } if in.AccessModes != nil { out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) for i := range in.AccessModes { out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) } } else { out.AccessModes = nil } if in.ClaimRef != nil { out.ClaimRef = new(api.ObjectReference) if err := convert_v1_ObjectReference_To_api_ObjectReference(in.ClaimRef, out.ClaimRef, s); err != nil { return err } } else { out.ClaimRef = nil } out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) return nil } func convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PersistentVolumeStatus))(in) } out.Phase = api.PersistentVolumePhase(in.Phase) out.Message = in.Message out.Reason = in.Reason return nil } func convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Pod))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodAttachOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container return nil } func convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodCondition))(in) } out.Type = api.PodConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) return nil } func convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodExecOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } return nil } func convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Pod, len(in.Items)) for i := range in.Items { if err := convert_v1_Pod_To_api_Pod(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodLogOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Container = in.Container out.Follow = in.Follow out.Previous = in.Previous return nil } func convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodProxyOptions))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.Path = in.Path return nil } func convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodStatus))(in) } out.Phase = api.PodPhase(in.Phase) if in.Conditions != nil { out.Conditions = make([]api.PodCondition, len(in.Conditions)) for i := range in.Conditions { if err := convert_v1_PodCondition_To_api_PodCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { return err } } } else { out.Conditions = nil } out.Message = in.Message out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { if err := s.Convert(&in.StartTime, &out.StartTime, 0); err != nil { return err } } else { out.StartTime = nil } if in.ContainerStatuses != nil { out.ContainerStatuses = make([]api.ContainerStatus, len(in.ContainerStatuses)) for i := range in.ContainerStatuses { if err := convert_v1_ContainerStatus_To_api_ContainerStatus(&in.ContainerStatuses[i], &out.ContainerStatuses[i], s); err != nil { return err } } } else { out.ContainerStatuses = nil } return nil } func convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodStatusResult))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodTemplate))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodTemplateList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.PodTemplate, len(in.Items)) for i := range in.Items { if err := convert_v1_PodTemplate_To_api_PodTemplate(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodTemplateSpec))(in) } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } func convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Probe))(in) } if err := convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { return err } out.InitialDelaySeconds = in.InitialDelaySeconds out.TimeoutSeconds = in.TimeoutSeconds return nil } func convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*RBDVolumeSource))(in) } if in.CephMonitors != nil { out.CephMonitors = make([]string, len(in.CephMonitors)) for i := range in.CephMonitors { out.CephMonitors[i] = in.CephMonitors[i] } } else { out.CephMonitors = nil } out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool out.RadosUser = in.RadosUser out.Keyring = in.Keyring if in.SecretRef != nil { out.SecretRef = new(api.LocalObjectReference) if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { return err } } else { out.SecretRef = nil } out.ReadOnly = in.ReadOnly return nil } func convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*RangeAllocation))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } out.Range = in.Range if err := s.Convert(&in.Data, &out.Data, 0); err != nil { return err } return nil } func convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ReplicationController))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ReplicationControllerList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.ReplicationController, len(in.Items)) for i := range in.Items { if err := convert_v1_ReplicationController_To_api_ReplicationController(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ReplicationControllerStatus))(in) } out.Replicas = in.Replicas out.ObservedGeneration = in.ObservedGeneration return nil } func convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceQuota))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceQuotaList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.ResourceQuota, len(in.Items)) for i := range in.Items { if err := convert_v1_ResourceQuota_To_api_ResourceQuota(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceQuotaSpec))(in) } if in.Hard != nil { out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Hard[api.ResourceName(key)] = newVal } } else { out.Hard = nil } return nil } func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceQuotaStatus))(in) } if in.Hard != nil { out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Hard[api.ResourceName(key)] = newVal } } else { out.Hard = nil } if in.Used != nil { out.Used = make(api.ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Used[api.ResourceName(key)] = newVal } } else { out.Used = nil } return nil } func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ResourceRequirements))(in) } if in.Limits != nil { out.Limits = make(api.ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Limits[api.ResourceName(key)] = newVal } } else { out.Limits = nil } if in.Requests != nil { out.Requests = make(api.ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Requests[api.ResourceName(key)] = newVal } } else { out.Requests = nil } return nil } func convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SELinuxOptions))(in) } out.User = in.User out.Role = in.Role out.Type = in.Type out.Level = in.Level return nil } func convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Secret))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Data != nil { out.Data = make(map[string][]uint8) for key, val := range in.Data { newVal := []uint8{} if err := s.Convert(&val, &newVal, 0); err != nil { return err } out.Data[key] = newVal } } else { out.Data = nil } out.Type = api.SecretType(in.Type) return nil } func convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SecretList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Secret, len(in.Items)) for i := range in.Items { if err := convert_v1_Secret_To_api_Secret(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SecretVolumeSource))(in) } out.SecretName = in.SecretName return nil } func convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SecurityContext))(in) } if in.Capabilities != nil { out.Capabilities = new(api.Capabilities) if err := convert_v1_Capabilities_To_api_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { return err } } else { out.Capabilities = nil } if in.Privileged != nil { out.Privileged = new(bool) *out.Privileged = *in.Privileged } else { out.Privileged = nil } if in.SELinuxOptions != nil { out.SELinuxOptions = new(api.SELinuxOptions) if err := convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { out.SELinuxOptions = nil } if in.RunAsUser != nil { out.RunAsUser = new(int64) *out.RunAsUser = *in.RunAsUser } else { out.RunAsUser = nil } out.RunAsNonRoot = in.RunAsNonRoot return nil } func convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*SerializedReference))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { return err } return nil } func convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Service))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { return err } if err := convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } func convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServiceAccount))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Secrets != nil { out.Secrets = make([]api.ObjectReference, len(in.Secrets)) for i := range in.Secrets { if err := convert_v1_ObjectReference_To_api_ObjectReference(&in.Secrets[i], &out.Secrets[i], s); err != nil { return err } } } else { out.Secrets = nil } if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) for i := range in.ImagePullSecrets { if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { return err } } } else { out.ImagePullSecrets = nil } return nil } func convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServiceAccountList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.ServiceAccount, len(in.Items)) for i := range in.Items { if err := convert_v1_ServiceAccount_To_api_ServiceAccount(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServiceList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { out.Items = make([]api.Service, len(in.Items)) for i := range in.Items { if err := convert_v1_Service_To_api_Service(&in.Items[i], &out.Items[i], s); err != nil { return err } } } else { out.Items = nil } return nil } func convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServicePort))(in) } out.Name = in.Name out.Protocol = api.Protocol(in.Protocol) out.Port = in.Port if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil { return err } out.NodePort = in.NodePort return nil } func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServiceSpec))(in) } if in.Ports != nil { out.Ports = make([]api.ServicePort, len(in.Ports)) for i := range in.Ports { if err := convert_v1_ServicePort_To_api_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } else { out.Ports = nil } if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { out.Selector[key] = val } } else { out.Selector = nil } out.ClusterIP = in.ClusterIP out.Type = api.ServiceType(in.Type) if in.ExternalIPs != nil { out.ExternalIPs = make([]string, len(in.ExternalIPs)) for i := range in.ExternalIPs { out.ExternalIPs[i] = in.ExternalIPs[i] } } else { out.ExternalIPs = nil } out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) return nil } func convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ServiceStatus))(in) } if err := convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { return err } return nil } func convert_v1_Status_To_api_Status(in *Status, out *api.Status, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Status))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ListMeta_To_api_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } out.Status = in.Status out.Message = in.Message out.Reason = api.StatusReason(in.Reason) if in.Details != nil { out.Details = new(api.StatusDetails) if err := convert_v1_StatusDetails_To_api_StatusDetails(in.Details, out.Details, s); err != nil { return err } } else { out.Details = nil } out.Code = in.Code return nil } func convert_v1_StatusCause_To_api_StatusCause(in *StatusCause, out *api.StatusCause, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*StatusCause))(in) } out.Type = api.CauseType(in.Type) out.Message = in.Message out.Field = in.Field return nil } func convert_v1_StatusDetails_To_api_StatusDetails(in *StatusDetails, out *api.StatusDetails, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*StatusDetails))(in) } out.Name = in.Name out.Kind = in.Kind if in.Causes != nil { out.Causes = make([]api.StatusCause, len(in.Causes)) for i := range in.Causes { if err := convert_v1_StatusCause_To_api_StatusCause(&in.Causes[i], &out.Causes[i], s); err != nil { return err } } } else { out.Causes = nil } out.RetryAfterSeconds = in.RetryAfterSeconds return nil } func convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*TCPSocketAction))(in) } if err := s.Convert(&in.Port, &out.Port, 0); err != nil { return err } return nil } func convert_v1_ThirdPartyResourceData_To_api_ThirdPartyResourceData(in *ThirdPartyResourceData, out *api.ThirdPartyResourceData, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ThirdPartyResourceData))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if err := s.Convert(&in.Data, &out.Data, 0); err != nil { return err } return nil } func convert_v1_TypeMeta_To_api_TypeMeta(in *TypeMeta, out *api.TypeMeta, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*TypeMeta))(in) } out.Kind = in.Kind out.APIVersion = in.APIVersion return nil } func convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Volume))(in) } out.Name = in.Name if err := convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { return err } return nil } func convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*VolumeMount))(in) } out.Name = in.Name out.ReadOnly = in.ReadOnly out.MountPath = in.MountPath return nil } func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*VolumeSource))(in) } if in.HostPath != nil { out.HostPath = new(api.HostPathVolumeSource) if err := convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { return err } } else { out.HostPath = nil } if in.EmptyDir != nil { out.EmptyDir = new(api.EmptyDirVolumeSource) if err := convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { return err } } else { out.EmptyDir = nil } if in.GCEPersistentDisk != nil { out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) if err := convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) if err := convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.GitRepo != nil { out.GitRepo = new(api.GitRepoVolumeSource) if err := convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { return err } } else { out.GitRepo = nil } if in.Secret != nil { out.Secret = new(api.SecretVolumeSource) if err := convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { return err } } else { out.Secret = nil } if in.NFS != nil { out.NFS = new(api.NFSVolumeSource) if err := convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { return err } } else { out.NFS = nil } if in.ISCSI != nil { out.ISCSI = new(api.ISCSIVolumeSource) if err := convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { return err } } else { out.ISCSI = nil } if in.Glusterfs != nil { out.Glusterfs = new(api.GlusterfsVolumeSource) if err := convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { return err } } else { out.Glusterfs = nil } if in.PersistentVolumeClaim != nil { out.PersistentVolumeClaim = new(api.PersistentVolumeClaimVolumeSource) if err := convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { return err } } else { out.PersistentVolumeClaim = nil } if in.RBD != nil { out.RBD = new(api.RBDVolumeSource) if err := convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { return err } } else { out.RBD = nil } return nil } func init() { err := api.Scheme.AddGeneratedConversionFuncs( convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, convert_api_Binding_To_v1_Binding, convert_api_Capabilities_To_v1_Capabilities, convert_api_ComponentCondition_To_v1_ComponentCondition, convert_api_ComponentStatusList_To_v1_ComponentStatusList, convert_api_ComponentStatus_To_v1_ComponentStatus, convert_api_ContainerPort_To_v1_ContainerPort, convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, convert_api_ContainerState_To_v1_ContainerState, convert_api_ContainerStatus_To_v1_ContainerStatus, convert_api_Container_To_v1_Container, convert_api_DaemonList_To_v1_DaemonList, convert_api_DaemonSpec_To_v1_DaemonSpec, convert_api_DaemonStatus_To_v1_DaemonStatus, convert_api_Daemon_To_v1_Daemon, convert_api_DeleteOptions_To_v1_DeleteOptions, convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, convert_api_EndpointAddress_To_v1_EndpointAddress, convert_api_EndpointPort_To_v1_EndpointPort, convert_api_EndpointSubset_To_v1_EndpointSubset, convert_api_EndpointsList_To_v1_EndpointsList, convert_api_Endpoints_To_v1_Endpoints, convert_api_EnvVarSource_To_v1_EnvVarSource, convert_api_EnvVar_To_v1_EnvVar, convert_api_EventList_To_v1_EventList, convert_api_EventSource_To_v1_EventSource, convert_api_Event_To_v1_Event, convert_api_ExecAction_To_v1_ExecAction, convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, convert_api_HTTPGetAction_To_v1_HTTPGetAction, convert_api_Handler_To_v1_Handler, convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, convert_api_Lifecycle_To_v1_Lifecycle, convert_api_LimitRangeItem_To_v1_LimitRangeItem, convert_api_LimitRangeList_To_v1_LimitRangeList, convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, convert_api_LimitRange_To_v1_LimitRange, convert_api_ListMeta_To_v1_ListMeta, convert_api_ListOptions_To_v1_ListOptions, convert_api_List_To_v1_List, convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, convert_api_LocalObjectReference_To_v1_LocalObjectReference, convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, convert_api_NamespaceList_To_v1_NamespaceList, convert_api_NamespaceSpec_To_v1_NamespaceSpec, convert_api_NamespaceStatus_To_v1_NamespaceStatus, convert_api_Namespace_To_v1_Namespace, convert_api_NodeAddress_To_v1_NodeAddress, convert_api_NodeCondition_To_v1_NodeCondition, convert_api_NodeList_To_v1_NodeList, convert_api_NodeSpec_To_v1_NodeSpec, convert_api_NodeStatus_To_v1_NodeStatus, convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, convert_api_Node_To_v1_Node, convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, convert_api_ObjectMeta_To_v1_ObjectMeta, convert_api_ObjectReference_To_v1_ObjectReference, convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, convert_api_PersistentVolume_To_v1_PersistentVolume, convert_api_PodAttachOptions_To_v1_PodAttachOptions, convert_api_PodCondition_To_v1_PodCondition, convert_api_PodExecOptions_To_v1_PodExecOptions, convert_api_PodList_To_v1_PodList, convert_api_PodLogOptions_To_v1_PodLogOptions, convert_api_PodProxyOptions_To_v1_PodProxyOptions, convert_api_PodStatusResult_To_v1_PodStatusResult, convert_api_PodStatus_To_v1_PodStatus, convert_api_PodTemplateList_To_v1_PodTemplateList, convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, convert_api_PodTemplate_To_v1_PodTemplate, convert_api_Pod_To_v1_Pod, convert_api_Probe_To_v1_Probe, convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, convert_api_RangeAllocation_To_v1_RangeAllocation, convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, convert_api_ReplicationController_To_v1_ReplicationController, convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, convert_api_ResourceQuota_To_v1_ResourceQuota, convert_api_ResourceRequirements_To_v1_ResourceRequirements, convert_api_SELinuxOptions_To_v1_SELinuxOptions, convert_api_SecretList_To_v1_SecretList, convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, convert_api_Secret_To_v1_Secret, convert_api_SecurityContext_To_v1_SecurityContext, convert_api_SerializedReference_To_v1_SerializedReference, convert_api_ServiceAccountList_To_v1_ServiceAccountList, convert_api_ServiceAccount_To_v1_ServiceAccount, convert_api_ServiceList_To_v1_ServiceList, convert_api_ServicePort_To_v1_ServicePort, convert_api_ServiceSpec_To_v1_ServiceSpec, convert_api_ServiceStatus_To_v1_ServiceStatus, convert_api_Service_To_v1_Service, convert_api_StatusCause_To_v1_StatusCause, convert_api_StatusDetails_To_v1_StatusDetails, convert_api_Status_To_v1_Status, convert_api_TCPSocketAction_To_v1_TCPSocketAction, convert_api_ThirdPartyResourceData_To_v1_ThirdPartyResourceData, convert_api_TypeMeta_To_v1_TypeMeta, convert_api_VolumeMount_To_v1_VolumeMount, convert_api_VolumeSource_To_v1_VolumeSource, convert_api_Volume_To_v1_Volume, convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, convert_v1_Binding_To_api_Binding, convert_v1_Capabilities_To_api_Capabilities, convert_v1_ComponentCondition_To_api_ComponentCondition, convert_v1_ComponentStatusList_To_api_ComponentStatusList, convert_v1_ComponentStatus_To_api_ComponentStatus, convert_v1_ContainerPort_To_api_ContainerPort, convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, convert_v1_ContainerState_To_api_ContainerState, convert_v1_ContainerStatus_To_api_ContainerStatus, convert_v1_Container_To_api_Container, convert_v1_DaemonList_To_api_DaemonList, convert_v1_DaemonSpec_To_api_DaemonSpec, convert_v1_DaemonStatus_To_api_DaemonStatus, convert_v1_Daemon_To_api_Daemon, convert_v1_DeleteOptions_To_api_DeleteOptions, convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, convert_v1_EndpointAddress_To_api_EndpointAddress, convert_v1_EndpointPort_To_api_EndpointPort, convert_v1_EndpointSubset_To_api_EndpointSubset, convert_v1_EndpointsList_To_api_EndpointsList, convert_v1_Endpoints_To_api_Endpoints, convert_v1_EnvVarSource_To_api_EnvVarSource, convert_v1_EnvVar_To_api_EnvVar, convert_v1_EventList_To_api_EventList, convert_v1_EventSource_To_api_EventSource, convert_v1_Event_To_api_Event, convert_v1_ExecAction_To_api_ExecAction, convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, convert_v1_HTTPGetAction_To_api_HTTPGetAction, convert_v1_Handler_To_api_Handler, convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, convert_v1_Lifecycle_To_api_Lifecycle, convert_v1_LimitRangeItem_To_api_LimitRangeItem, convert_v1_LimitRangeList_To_api_LimitRangeList, convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, convert_v1_LimitRange_To_api_LimitRange, convert_v1_ListMeta_To_api_ListMeta, convert_v1_ListOptions_To_api_ListOptions, convert_v1_List_To_api_List, convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, convert_v1_LocalObjectReference_To_api_LocalObjectReference, convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, convert_v1_NamespaceList_To_api_NamespaceList, convert_v1_NamespaceSpec_To_api_NamespaceSpec, convert_v1_NamespaceStatus_To_api_NamespaceStatus, convert_v1_Namespace_To_api_Namespace, convert_v1_NodeAddress_To_api_NodeAddress, convert_v1_NodeCondition_To_api_NodeCondition, convert_v1_NodeList_To_api_NodeList, convert_v1_NodeSpec_To_api_NodeSpec, convert_v1_NodeStatus_To_api_NodeStatus, convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, convert_v1_Node_To_api_Node, convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, convert_v1_ObjectMeta_To_api_ObjectMeta, convert_v1_ObjectReference_To_api_ObjectReference, convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, convert_v1_PersistentVolume_To_api_PersistentVolume, convert_v1_PodAttachOptions_To_api_PodAttachOptions, convert_v1_PodCondition_To_api_PodCondition, convert_v1_PodExecOptions_To_api_PodExecOptions, convert_v1_PodList_To_api_PodList, convert_v1_PodLogOptions_To_api_PodLogOptions, convert_v1_PodProxyOptions_To_api_PodProxyOptions, convert_v1_PodStatusResult_To_api_PodStatusResult, convert_v1_PodStatus_To_api_PodStatus, convert_v1_PodTemplateList_To_api_PodTemplateList, convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, convert_v1_PodTemplate_To_api_PodTemplate, convert_v1_Pod_To_api_Pod, convert_v1_Probe_To_api_Probe, convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, convert_v1_RangeAllocation_To_api_RangeAllocation, convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, convert_v1_ReplicationController_To_api_ReplicationController, convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, convert_v1_ResourceQuota_To_api_ResourceQuota, convert_v1_ResourceRequirements_To_api_ResourceRequirements, convert_v1_SELinuxOptions_To_api_SELinuxOptions, convert_v1_SecretList_To_api_SecretList, convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, convert_v1_Secret_To_api_Secret, convert_v1_SecurityContext_To_api_SecurityContext, convert_v1_SerializedReference_To_api_SerializedReference, convert_v1_ServiceAccountList_To_api_ServiceAccountList, convert_v1_ServiceAccount_To_api_ServiceAccount, convert_v1_ServiceList_To_api_ServiceList, convert_v1_ServicePort_To_api_ServicePort, convert_v1_ServiceSpec_To_api_ServiceSpec, convert_v1_ServiceStatus_To_api_ServiceStatus, convert_v1_Service_To_api_Service, convert_v1_StatusCause_To_api_StatusCause, convert_v1_StatusDetails_To_api_StatusDetails, convert_v1_Status_To_api_Status, convert_v1_TCPSocketAction_To_api_TCPSocketAction, convert_v1_ThirdPartyResourceData_To_api_ThirdPartyResourceData, convert_v1_TypeMeta_To_api_TypeMeta, convert_v1_VolumeMount_To_api_VolumeMount, convert_v1_VolumeSource_To_api_VolumeSource, convert_v1_Volume_To_api_Volume, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. panic(err) } }
/** * Copyright 2012 Twitter, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package parquet.hadoop.codec; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.io.compress.Decompressor; import org.xerial.snappy.Snappy; import parquet.Preconditions; public class SnappyDecompressor implements Decompressor { // Buffer for uncompressed output. This buffer grows as necessary. private ByteBuffer outputBuffer = ByteBuffer.allocateDirect(0); // Buffer for compressed input. This buffer grows as necessary. private ByteBuffer inputBuffer = ByteBuffer.allocateDirect(0); private boolean finished; /** * Fills specified buffer with uncompressed data. Returns actual number * of bytes of uncompressed data. A return value of 0 indicates that * {@link #needsInput()} should be called in order to determine if more * input data is required. * * @param buffer Buffer for the compressed data * @param off Start offset of the data * @param len Size of the buffer * @return The actual number of bytes of uncompressed data. * @throws IOException */ @Override public synchronized int decompress(byte[] buffer, int off, int len) throws IOException { SnappyUtil.validateBuffer(buffer, off, len); if (inputBuffer.position() == 0 && !outputBuffer.hasRemaining()) { return 0; } if (!outputBuffer.hasRemaining()) { inputBuffer.rewind(); Preconditions.checkArgument(inputBuffer.position() == 0, "Invalid position of 0."); Preconditions.checkArgument(outputBuffer.position() == 0, "Invalid position of 0."); // There is compressed input, decompress it now. int decompressedSize = Snappy.uncompressedLength(inputBuffer); if (decompressedSize > outputBuffer.capacity()) { outputBuffer = ByteBuffer.allocateDirect(decompressedSize); } // Reset the previous outputBuffer (i.e. set position to 0) outputBuffer.clear(); int size = Snappy.uncompress(inputBuffer, outputBuffer); outputBuffer.limit(size); // We've decompressed the entire input, reset the input now inputBuffer.clear(); inputBuffer.limit(0); finished = true; } // Return compressed output up to 'len' int numBytes = Math.min(len, outputBuffer.remaining()); outputBuffer.get(buffer, off, numBytes); return numBytes; } /** * Sets input data for decompression. * This should be called if and only if {@link #needsInput()} returns * <code>true</code> indicating that more input data is required. * (Both native and non-native versions of various Decompressors require * that the data passed in via <code>b[]</code> remain unmodified until * the caller is explicitly notified--via {@link #needsInput()}--that the * buffer may be safely modified. With this requirement, an extra * buffer-copy can be avoided.) * * @param buffer Input data * @param off Start offset * @param len Length */ @Override public synchronized void setInput(byte[] buffer, int off, int len) { SnappyUtil.validateBuffer(buffer, off, len); if (inputBuffer.capacity() - inputBuffer.position() < len) { ByteBuffer newBuffer = ByteBuffer.allocateDirect(inputBuffer.position() + len); inputBuffer.rewind(); newBuffer.put(inputBuffer); inputBuffer = newBuffer; } else { inputBuffer.limit(inputBuffer.position() + len); } inputBuffer.put(buffer, off, len); } @Override public void end() { // No-op } @Override public synchronized boolean finished() { return finished && !outputBuffer.hasRemaining(); } @Override public int getRemaining() { return 0; } @Override public synchronized boolean needsInput() { return !inputBuffer.hasRemaining() && !outputBuffer.hasRemaining(); } @Override public synchronized void reset() { finished = false; inputBuffer.rewind(); outputBuffer.rewind(); inputBuffer.limit(0); outputBuffer.limit(0); } @Override public boolean needsDictionary() { return false; } @Override public void setDictionary(byte[] b, int off, int len) { // No-op } }
/** * Copyright 2014 NAVER Corp. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.pinpoint.profiler.interceptor.bci; import java.util.concurrent.Callable; /** * @author jaehong.kim * */ public class TestObjectNestedClass { public void annonymousInnerClass() { new Callable<Object>() { @Override public Object call() throws Exception { return null; } }; new Runnable() { public void run() { } }; } public void annonymousInnerClass2() { new Callable<Object>() { @Override public Object call() throws Exception { return null; } }; new Runnable() { public void run() { } }; } public void instanceInnerClass() { new InstanceInner(); } class InstanceInner {} public void localInnerClass() { class LocalInner {} new LocalInner(); } public void localInnerClass2() { class LocalInner {} new LocalInner(); } public void staticNestedClass() { new StaticNested(); } static class StaticNested{} public void enclosingMethod(String s, int i) { class LocalInner {} new LocalInner(); } }
/* * Copyright (c) 2017, WSO2 Inc. (http://wso2.com) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package server.obj; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "Salary") public class Salary { private long id; private long fixed; private long allowance; private String empId; public long getId() { return id; } public void setId(long id) { this.id = id; } public long getFixed() { return fixed; } public void setFexed(long fixed) { this.fixed = fixed; } public long getAllowance() { return allowance; } public void setAllowance(long allowance) { this.allowance = allowance; } public String getEmpId() { return empId; } public void setEmpId(String empId) { this.empId = empId; } }
package com.myapp; public final class R { public static final class drawable { public static final int test_icon=0x7f02005d; } public static final class mipmap { public static final int test_icon=0x7f04005d; } public static final class color { public static final int colorAccent=0x7f0a0013; public static final int colorPrimary=0x7f0a0014; public static final int colorPrimaryDark=0x7f0a0015; } public static final class layout { public static final int activity_kotlin=0x7f08005d; } }
/** * slider - jQuery EasyUI * * Copyright (c) 2009-2013 www.jeasyui.com. All rights reserved. * * Licensed under the GPL or commercial licenses * To use it on other terms please contact us: info@jeasyui.com * http://www.gnu.org/licenses/gpl.txt * http://www.jeasyui.com/license_commercial.php * * Dependencies: * draggable * */ (function($){ function init(target){ var slider = $('<div class="slider">' + '<div class="slider-inner">' + '<a href="javascript:void(0)" class="slider-handle"></a>' + '<span class="slider-tip"></span>' + '</div>' + '<div class="slider-rule"></div>' + '<div class="slider-rulelabel"></div>' + '<div style="clear:both"></div>' + '<input type="hidden" class="slider-value">' + '</div>').insertAfter(target); var t = $(target); t.addClass('slider-f').hide(); var name = t.attr('name'); if (name){ slider.find('input.slider-value').attr('name', name); t.removeAttr('name').attr('sliderName', name); } return slider; } /** * set the slider size, for vertical slider, the height property is required */ function setSize(target, param){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; if (param){ if (param.width) opts.width = param.width; if (param.height) opts.height = param.height; } if (opts.mode == 'h'){ slider.css('height', ''); slider.children('div').css('height', ''); if (!isNaN(opts.width)){ slider.width(opts.width); } } else { slider.css('width', ''); slider.children('div').css('width', ''); if (!isNaN(opts.height)){ slider.height(opts.height); slider.find('div.slider-rule').height(opts.height); slider.find('div.slider-rulelabel').height(opts.height); slider.find('div.slider-inner')._outerHeight(opts.height); } } initValue(target); } /** * show slider rule if needed */ function showRule(target){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; var aa = opts.mode == 'h' ? opts.rule : opts.rule.slice(0).reverse(); if (opts.reversed){ aa = aa.slice(0).reverse(); } _build(aa); function _build(aa){ var rule = slider.find('div.slider-rule'); var label = slider.find('div.slider-rulelabel'); rule.empty(); label.empty(); for(var i=0; i<aa.length; i++){ var distance = i*100/(aa.length-1)+'%'; var span = $('<span></span>').appendTo(rule); span.css((opts.mode=='h'?'left':'top'), distance); // show the labels if (aa[i] != '|'){ span = $('<span></span>').appendTo(label); span.html(aa[i]); if (opts.mode == 'h'){ span.css({ left: distance, marginLeft: -Math.round(span.outerWidth()/2) }); } else { span.css({ top: distance, marginTop: -Math.round(span.outerHeight()/2) }); } } } } } /** * build the slider and set some properties */ function buildSlider(target){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; slider.removeClass('slider-h slider-v slider-disabled'); slider.addClass(opts.mode == 'h' ? 'slider-h' : 'slider-v'); slider.addClass(opts.disabled ? 'slider-disabled' : ''); slider.find('a.slider-handle').draggable({ axis:opts.mode, cursor:'pointer', disabled: opts.disabled, onDrag:function(e){ var left = e.data.left; var width = slider.width(); if (opts.mode!='h'){ left = e.data.top; width = slider.height(); } if (left < 0 || left > width) { return false; } else { var value = pos2value(target, left); adjustValue(value); return false; } }, onBeforeDrag:function(){ state.isDragging = true; }, onStartDrag:function(){ opts.onSlideStart.call(target, opts.value); }, onStopDrag:function(e){ var value = pos2value(target, (opts.mode=='h'?e.data.left:e.data.top)); adjustValue(value); opts.onSlideEnd.call(target, opts.value); opts.onComplete.call(target, opts.value); state.isDragging = false; } }); slider.find('div.slider-inner').unbind('.slider').bind('mousedown.slider', function(e){ if (state.isDragging){return} var pos = $(this).offset(); var value = pos2value(target, (opts.mode=='h'?(e.pageX-pos.left):(e.pageY-pos.top))); adjustValue(value); opts.onComplete.call(target, opts.value); }); function adjustValue(value){ var s = Math.abs(value % opts.step); if (s < opts.step/2){ value -= s; } else { value = value - s + opts.step; } setValue(target, value); } } /** * set a specified value to slider */ function setValue(target, value){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; var oldValue = opts.value; if (value < opts.min) value = opts.min; if (value > opts.max) value = opts.max; opts.value = value; $(target).val(value); slider.find('input.slider-value').val(value); var pos = value2pos(target, value); var tip = slider.find('.slider-tip'); if (opts.showTip){ tip.show(); tip.html(opts.tipFormatter.call(target, opts.value)); } else { tip.hide(); } if (opts.mode == 'h'){ var style = 'left:'+pos+'px;'; slider.find('.slider-handle').attr('style', style); tip.attr('style', style + 'margin-left:' + (-Math.round(tip.outerWidth()/2)) + 'px'); } else { var style = 'top:' + pos + 'px;'; slider.find('.slider-handle').attr('style', style); tip.attr('style', style + 'margin-left:' + (-Math.round(tip.outerWidth())) + 'px'); } if (oldValue != value){ opts.onChange.call(target, value, oldValue); } } function initValue(target){ var opts = $.data(target, 'slider').options; var fn = opts.onChange; opts.onChange = function(){}; setValue(target, opts.value); opts.onChange = fn; } /** * translate value to slider position */ function value2pos(target, value){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; if (opts.mode == 'h'){ var pos = (value-opts.min)/(opts.max-opts.min)*slider.width(); if (opts.reversed){ pos = slider.width() - pos; } } else { var pos = slider.height() - (value-opts.min)/(opts.max-opts.min)*slider.height(); if (opts.reversed){ pos = slider.height() - pos; } } return pos.toFixed(0); } /** * translate slider position to value */ function pos2value(target, pos){ var state = $.data(target, 'slider'); var opts = state.options; var slider = state.slider; if (opts.mode == 'h'){ var value = opts.min + (opts.max-opts.min)*(pos/slider.width()); } else { var value = opts.min + (opts.max-opts.min)*((slider.height()-pos)/slider.height()); } return opts.reversed ? opts.max - value.toFixed(0) : value.toFixed(0); } $.fn.slider = function(options, param){ if (typeof options == 'string'){ return $.fn.slider.methods[options](this, param); } options = options || {}; return this.each(function(){ var state = $.data(this, 'slider'); if (state){ $.extend(state.options, options); } else { state = $.data(this, 'slider', { options: $.extend({}, $.fn.slider.defaults, $.fn.slider.parseOptions(this), options), slider: init(this) }); $(this).removeAttr('disabled'); } var opts = state.options; opts.min = parseFloat(opts.min); opts.max = parseFloat(opts.max); opts.value = parseFloat(opts.value); opts.step = parseFloat(opts.step); opts.originalValue = opts.value; buildSlider(this); showRule(this); setSize(this); }); }; $.fn.slider.methods = { options: function(jq){ return $.data(jq[0], 'slider').options; }, destroy: function(jq){ return jq.each(function(){ $.data(this, 'slider').slider.remove(); $(this).remove(); }); }, resize: function(jq, param){ return jq.each(function(){ setSize(this, param); }); }, getValue: function(jq){ return jq.slider('options').value; }, setValue: function(jq, value){ return jq.each(function(){ setValue(this, value); }); }, clear: function(jq){ return jq.each(function(){ var opts = $(this).slider('options'); setValue(this, opts.min); }); }, reset: function(jq){ return jq.each(function(){ var opts = $(this).slider('options'); setValue(this, opts.originalValue); }); }, enable: function(jq){ return jq.each(function(){ $.data(this, 'slider').options.disabled = false; buildSlider(this); }); }, disable: function(jq){ return jq.each(function(){ $.data(this, 'slider').options.disabled = true; buildSlider(this); }); } }; $.fn.slider.parseOptions = function(target){ var t = $(target); return $.extend({}, $.parser.parseOptions(target, [ 'width','height','mode',{reversed:'boolean',showTip:'boolean',min:'number',max:'number',step:'number'} ]), { value: (t.val() || undefined), disabled: (t.attr('disabled') ? true : undefined), rule: (t.attr('rule') ? eval(t.attr('rule')) : undefined) }); }; $.fn.slider.defaults = { width: 'auto', height: 'auto', mode: 'h', // 'h'(horizontal) or 'v'(vertical) reversed: false, showTip: false, disabled: false, value: 0, min: 0, max: 100, step: 1, rule: [], // [0,'|',100] tipFormatter: function(value){return value}, onChange: function(value, oldValue){}, onSlideStart: function(value){}, onSlideEnd: function(value){}, onComplete: function(value){} }; })(jQuery);
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.blogspot.na5cent.connectdb; import com.blogspot.na5cent.connectdb.model.Department; import com.blogspot.na5cent.connectdb.printer.GenericReflectPrinter; import com.blogspot.na5cent.connectdb.query.Page; import com.blogspot.na5cent.connectdb.query.Pagination; import com.blogspot.na5cent.connectdb.service.DepartmentService; /** * * @author anonymous */ public class S5QueryPagination { public static void main(String[] args) throws Exception { Pagination pagination = new Pagination(1, 5); Page<Department> page = DepartmentService.findAll(pagination); System.out.println("total elements = " + page.getTotalElements()); System.out.println("total pages = " + page.getTotalPages()); System.out.println("page size = " + page.getPageRequestSize()); System.out.println("current page = " + page.getCurrentPageNumber()); System.out.println("current page size = " + page.getCurrentPageSize()); GenericReflectPrinter.prints(page.getContents()); } }
/******************************************************************************* * * Pentaho Big Data * * Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.bigdata; import org.pentaho.di.core.KettleClientEnvironment; import org.pentaho.di.core.plugins.PluginRegistry; import org.pentaho.di.core.plugins.PluginRegistryExtension; import org.pentaho.di.core.plugins.PluginTypeInterface; import org.pentaho.di.core.plugins.RegistryPlugin; @RegistryPlugin( id = "ShimDependentPluginRegistryPlugin", name = "ShimDependentPluginRegistryPlugin", description = "Registers sub plugins of the big data plugin that depend on the shim jars in their classpath" ) public class ShimDependentPluginRegistryPlugin implements PluginRegistryExtension { @Override public String getPluginId( Class<? extends PluginTypeInterface> arg0, Object arg1 ) { return null; } @Override public void init( PluginRegistry pluginRegistry ) { if ( KettleClientEnvironment.isInitialized() ) { PluginRegistry.addPluginType( ShimDependentJobEntryPluginType.getInstance() ); } } @Override public void searchForType( PluginTypeInterface pluginTypeInterface ) { } }
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.plugins.groovy.intentions.base; import com.intellij.codeInsight.CodeInsightUtilCore; import com.intellij.codeInsight.daemon.impl.quickfix.CreateFromUsageUtils; import com.intellij.codeInsight.daemon.impl.quickfix.CreateMethodFromUsageFix; import com.intellij.codeInsight.template.*; import com.intellij.ide.fileTemplates.FileTemplate; import com.intellij.ide.fileTemplates.FileTemplateManager; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.fileEditor.FileEditorManager; import com.intellij.openapi.fileEditor.OpenFileDescriptor; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.TextRange; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.*; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.util.IncorrectOperationException; import org.jetbrains.annotations.NotNull; import org.jetbrains.plugins.groovy.GroovyLanguage; import org.jetbrains.plugins.groovy.actions.GroovyTemplates; import org.jetbrains.plugins.groovy.lang.psi.api.statements.typedef.members.GrMethod; import org.jetbrains.plugins.groovy.lang.psi.expectedTypes.TypeConstraint; import org.jetbrains.plugins.groovy.lang.psi.util.GrTraitUtil; import org.jetbrains.plugins.groovy.template.expressions.ChooseTypeExpression; import org.jetbrains.plugins.groovy.template.expressions.ParameterNameExpression; public class IntentionUtils { private static final Logger LOG = Logger.getInstance(IntentionUtils.class); public static void createTemplateForMethod(PsiType[] argTypes, ChooseTypeExpression[] paramTypesExpressions, PsiMethod method, PsiClass owner, TypeConstraint[] constraints, boolean isConstructor, @NotNull final PsiElement context) { final Project project = owner.getProject(); PsiTypeElement typeElement = method.getReturnTypeElement(); ChooseTypeExpression expr = new ChooseTypeExpression(constraints, PsiManager.getInstance(project), context.getResolveScope(), method.getLanguage() == GroovyLanguage.INSTANCE ); TemplateBuilderImpl builder = new TemplateBuilderImpl(method); if (!isConstructor) { assert typeElement != null; builder.replaceElement(typeElement, expr); } PsiParameter[] parameters = method.getParameterList().getParameters(); assert parameters.length == argTypes.length; for (int i = 0; i < parameters.length; i++) { PsiParameter parameter = parameters[i]; PsiTypeElement parameterTypeElement = parameter.getTypeElement(); builder.replaceElement(parameterTypeElement, paramTypesExpressions[i]); builder.replaceElement(parameter.getNameIdentifier(), new ParameterNameExpression(null)); } PsiCodeBlock body = method.getBody(); if (body != null) { PsiElement lbrace = body.getLBrace(); assert lbrace != null; builder.setEndVariableAfter(lbrace); } else { builder.setEndVariableAfter(method.getParameterList()); } method = CodeInsightUtilCore.forcePsiPostprocessAndRestoreElement(method); Template template = builder.buildTemplate(); final PsiFile targetFile = owner.getContainingFile(); final Editor newEditor = positionCursor(project, targetFile, method); TextRange range = method.getTextRange(); newEditor.getDocument().deleteString(range.getStartOffset(), range.getEndOffset()); TemplateManager manager = TemplateManager.getInstance(project); TemplateEditingListener templateListener = new TemplateEditingAdapter() { @Override public void templateFinished(Template template, boolean brokenOff) { ApplicationManager.getApplication().runWriteAction(() -> { PsiDocumentManager.getInstance(project).commitDocument(newEditor.getDocument()); final int offset = newEditor.getCaretModel().getOffset(); PsiMethod method1 = PsiTreeUtil.findElementOfClassAtOffset(targetFile, offset - 1, PsiMethod.class, false); if (context instanceof PsiMethod) { final PsiTypeParameter[] typeParameters = ((PsiMethod)context).getTypeParameters(); if (typeParameters.length > 0) { for (PsiTypeParameter typeParameter : typeParameters) { if (CreateMethodFromUsageFix.checkTypeParam(method1, typeParameter)) { final JVMElementFactory factory = JVMElementFactories.getFactory(method1.getLanguage(), method1.getProject()); PsiTypeParameterList list = method1.getTypeParameterList(); if (list == null) { PsiTypeParameterList newList = factory.createTypeParameterList(); list = (PsiTypeParameterList)method1.addAfter(newList, method1.getModifierList()); } list.add(factory.createTypeParameter(typeParameter.getName(), typeParameter.getExtendsList().getReferencedTypes())); } } } } if (method1 != null) { try { final boolean hasNoReturnType = method1.getReturnTypeElement() == null && method1 instanceof GrMethod; if (hasNoReturnType) { ((GrMethod)method1).setReturnType(PsiType.VOID); } if (method1.getBody() != null) { FileTemplateManager templateManager = FileTemplateManager.getInstance(project); FileTemplate fileTemplate = templateManager.getCodeTemplate(GroovyTemplates.GROOVY_FROM_USAGE_METHOD_BODY); PsiClass containingClass = method1.getContainingClass(); LOG.assertTrue(!containingClass.isInterface() || GrTraitUtil.isTrait(containingClass), "Interface bodies should be already set up"); CreateFromUsageUtils.setupMethodBody(method1, containingClass, fileTemplate); } if (hasNoReturnType) { ((GrMethod)method1).setReturnType(null); } } catch (IncorrectOperationException e) { LOG.error(e); } CreateFromUsageUtils.setupEditor(method1, newEditor); } }); } }; manager.startTemplate(newEditor, template, templateListener); } public static Editor positionCursor(@NotNull Project project, @NotNull PsiFile targetFile, @NotNull PsiElement element) { int textOffset = element.getTextOffset(); VirtualFile virtualFile = targetFile.getVirtualFile(); if (virtualFile != null) { OpenFileDescriptor descriptor = new OpenFileDescriptor(project, virtualFile, textOffset); return FileEditorManager.getInstance(project).openTextEditor(descriptor, true); } else { return null; } } }