text
stringlengths 26
99.8k
| meta
dict |
|---|---|
// Copyright 2020 Anapaya Systems
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"bytes"
"fmt"
"html/template"
"net/http"
"os"
toml "github.com/pelletier/go-toml"
"github.com/scionproto/scion/go/lib/env"
"github.com/scionproto/scion/go/lib/serrors"
"github.com/scionproto/scion/go/lib/util"
)
const mainTmpl = `
<!DOCTYPE html>
<html>
<head>
<title>{{ .ElemId }}</title>
</head>
<body style="font-family:sans-serif">
<h1>{{ .ElemId }}</h1>
{{ range .Pages }}
<p><a href="{{ . }}">[{{ . }}]</a></p>
{{ end }}
</body>
</html>
`
type mainData struct {
ElemId string
Pages []string
}
// StatusPages maps from a page name to the HTTP handler serving that page.
type StatusPages map[string]http.HandlerFunc
// Register registers the pages with the supplied HTTP server.
// Additionally it registers the main page that links to all the other pages.
func (s StatusPages) Register(serveMux *http.ServeMux, elemId string) error {
t, err := template.New("main").Parse(mainTmpl)
if err != nil {
return err
}
var pages []string
for endpoint, handler := range s {
pages = append(pages, endpoint)
serveMux.HandleFunc(fmt.Sprintf("/%s", endpoint), handler)
}
var mainBuf bytes.Buffer
if err := t.Execute(&mainBuf, mainData{ElemId: elemId, Pages: pages}); err != nil {
return serrors.WrapStr("executing template", err)
}
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, mainBuf.String())
})
return nil
}
// NewConfigHandler returns an HTTP handler that serves a page with the
// specified TOML config.
func NewConfigHandler(config interface{}) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "text/plain")
var buf bytes.Buffer
toml.NewEncoder(&buf).Order(toml.OrderPreserve).Encode(config)
fmt.Fprint(w, buf.String())
}
}
// NewInfoHandler returns an HTTP handler that serves a page with basic info
// about the process.
func NewInfoHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
info := env.VersionInfo()
inDocker, err := util.RunsInDocker()
if err == nil {
info += fmt.Sprintf(" In docker: %v\n", inDocker)
}
info += fmt.Sprintf(" pid: %d\n", os.Getpid())
info += fmt.Sprintf(" euid/egid: %d %d\n", os.Geteuid(), os.Getegid())
info += fmt.Sprintf(" cmd line: %q\n", os.Args)
w.Header().Set("Content-Type", "text/plain")
fmt.Fprintf(w, info)
}
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"unsafe"
)
// Return the minimum value seen for the zone CPU cap, or 0 if no cap is
// detected.
func getcpucap() uint64 {
// The resource control block is an opaque object whose size is only
// known to libc. In practice, given the contents, it is unlikely to
// grow beyond 8KB so we'll use a static buffer of that size here.
const rblkmaxsize = 8 * 1024
if rctlblk_size() > rblkmaxsize {
return 0
}
// The "zone.cpu-cap" resource control, as described in
// resource_controls(5), "sets a limit on the amount of CPU time that
// can be used by a zone. The unit used is the percentage of a single
// CPU that can be used by all user threads in a zone, expressed as an
// integer." A C string of the name must be passed to getrctl(2).
name := []byte("zone.cpu-cap\x00")
// To iterate over the list of values for a particular resource
// control, we need two blocks: one for the previously read value and
// one for the next value.
var rblk0 [rblkmaxsize]byte
var rblk1 [rblkmaxsize]byte
rblk := &rblk0[0]
rblkprev := &rblk1[0]
var flag uint32 = _RCTL_FIRST
var capval uint64 = 0
for {
if getrctl(unsafe.Pointer(&name[0]), unsafe.Pointer(rblkprev), unsafe.Pointer(rblk), flag) != 0 {
// The end of the sequence is reported as an ENOENT
// failure, but determining the CPU cap is not critical
// here. We'll treat any failure as if it were the end
// of sequence.
break
}
lflags := rctlblk_get_local_flags(unsafe.Pointer(rblk))
action := rctlblk_get_local_action(unsafe.Pointer(rblk), 0)
if (lflags&_RCTL_LOCAL_MAXIMAL) == 0 && action == _RCTL_LOCAL_DENY {
// This is a finite (not maximal) value representing a
// cap (deny) action.
v := rctlblk_get_value(unsafe.Pointer(rblk))
if capval == 0 || capval > v {
capval = v
}
}
// Swap the blocks around so that we can fetch the next value
t := rblk
rblk = rblkprev
rblkprev = t
flag = _RCTL_NEXT
}
return capval
}
func getncpu() int32 {
n := int32(sysconf(__SC_NPROCESSORS_ONLN))
if n < 1 {
return 1
}
if cents := int32(getcpucap()); cents > 0 {
// Convert from a percentage of CPUs to a number of CPUs,
// rounding up to make use of a fractional CPU
// e.g., 336% becomes 4 CPUs
ncap := (cents + 99) / 100
if ncap < n {
return ncap
}
}
return n
}
//extern getrctl
func getrctl(controlname, oldbuf, newbuf unsafe.Pointer, flags uint32) int32
//extern rctlblk_get_local_action
func rctlblk_get_local_action(buf, signalp unsafe.Pointer) uint32
//extern rctlblk_get_local_flags
func rctlblk_get_local_flags(buf unsafe.Pointer) uint32
//extern rctlblk_get_value
func rctlblk_get_value(buf unsafe.Pointer) uint64
//extern rctlblk_size
func rclblk_size() uintptr
|
{
"pile_set_name": "Github"
}
|
#region Copyright (C) 2007-2018 Team MediaPortal
/*
Copyright (C) 2007-2018 Team MediaPortal
http://www.team-mediaportal.com
This file is part of MediaPortal 2
MediaPortal 2 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MediaPortal 2 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MediaPortal 2. If not, see <http://www.gnu.org/licenses/>.
*/
#endregion
using System.Collections.Generic;
using System.Runtime.Serialization;
namespace MediaPortal.Extensions.OnlineLibraries.Libraries.MusicBrainzV2.Data
{
//{
// "created": "2016-04-27T11:11:27.118Z",
// "count": 1,
// "offset": 0,
// "artists": [
// {
// "id": "8538e728-ca0b-4321-b7e5-cff6565dd4c0",
// "type": "Group",
// "score": "100",
// "name": "Depeche Mode",
// "sort-name": "Depeche Mode",
// "country": "GB",
// "area": {
// "id": "8a754a16-0027-3a29-b6d7-2b40ea0481ed",
// "name": "United Kingdom",
// "sort-name": "United Kingdom"
// },
// "begin-area": {
// "id": "9b4cb463-9777-46c3-8190-e1cb3da2749f",
// "name": "Basildon",
// "sort-name": "Basildon"
// },
// "life-span": {
// "begin": "1980",
// "ended": null
// },
// "aliases": [
// {
// "sort-name": "Depech Mode",
// "name": "Depech Mode",
// "locale": null,
// "type": null,
// "primary": null,
// "begin-date": null,
// "end-date": null
// },
// {
// "sort-name": "DM",
// "name": "DM",
// "locale": null,
// "type": "Search hint",
// "primary": null,
// "begin-date": null,
// "end-date": null
// }
// ],
// "tags": [
// {
// "count": 1,
// "name": "electronica"
// },
// {
// "count": 1,
// "name": "post punk"
// },
// {
// "count": 1,
// "name": "alternative dance"
// },
// {
// "count": 6,
// "name": "electronic"
// },
// {
// "count": 1,
// "name": "dark wave"
// },
// {
// "count": 0,
// "name": "britannique"
// },
// {
// "count": 4,
// "name": "british"
// },
// {
// "count": 1,
// "name": "english"
// },
// {
// "count": 2,
// "name": "uk"
// },
// {
// "count": 0,
// "name": "rock and indie"
// },
// {
// "count": 1,
// "name": "electronic rock"
// },
// {
// "count": 1,
// "name": "remix"
// },
// {
// "count": 0,
// "name": "synth pop"
// },
// {
// "count": 2,
// "name": "alternative rock"
// },
// {
// "count": 0,
// "name": "barrel"
// },
// {
// "count": 6,
// "name": "synthpop"
// },
// {
// "count": 4,
// "name": "new wave"
// },
// {
// "count": 1,
// "name": "new romantic"
// },
// {
// "count": 1,
// "name": "downtempo"
// },
// {
// "count": 0,
// "name": "producteur"
// },
// {
// "count": 0,
// "name": "producer"
// },
// {
// "count": 1,
// "name": "synth-pop"
// }
// ]
// }
// ]
//}
[DataContract]
public class TrackArtistResult
{
[DataMember(Name = "artists")]
public List<TrackArtist> Results { get; set; }
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<kryo.version>2.24.0</kryo.version>
</properties>
<name>samoa-flink</name>
<description>Flink engine for SAMOA</description>
<artifactId>samoa-flink</artifactId>
<parent>
<groupId>org.apache.samoa</groupId>
<artifactId>samoa</artifactId>
<version>0.5.0-incubating-SNAPSHOT</version>
</parent>
<repositories>
<repository>
<id>apache.snapshots</id>
<name>Apache Development Snapshot Repository</name>
<url>https://repository.apache.org/content/repositories/snapshots/</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.samoa</groupId>
<artifactId>samoa-api</artifactId>
<version>${project.version}</version>
<exclusions>
<exclusion>
<groupId>com.esotericsoftware.kryo</groupId>
<artifactId>kryo</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j-log4j12.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
<dependency>
<groupId>com.esotericsoftware.kryo</groupId>
<artifactId>kryo</artifactId>
<version>${kryo.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<!-- Flink assembly -->
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
<configuration>
<finalName>SAMOA-Flink-${project.version}</finalName>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<outputDirectory>../target</outputDirectory>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifestEntries>
<!--<Bundle-Version>${parsedVersion.osgiVersion}</Bundle-Version>-->
<Bundle-Description>${project.description}</Bundle-Description>
<Implementation-Version>${project.version}</Implementation-Version>
<Implementation-Vendor>Yahoo Labs</Implementation-Vendor>
<Implementation-Vendor-Id>SAMOA</Implementation-Vendor-Id>
</manifestEntries>
<manifest>
<addClasspath>true</addClasspath>
<mainClass>org.apache.samoa.flink.FlinkDoTask</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id> <!-- this is used for inheritance merges -->
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<argLine>-Xmx1G</argLine>
<redirectTestOutputToFile>false</redirectTestOutputToFile>
</configuration>
</plugin>
</plugins>
</build>
</project>
|
{
"pile_set_name": "Github"
}
|
<html><body><div class="content"><div class="item"><div class="clj"><div class="c-head">(body-x! entity x)</div><div class="c-doc"><p>Changes the <code>x</code> of the body in <code>entity</code>.</p></div></div><div class="c-head">Source</div><div class="c-src"><pre>(defn body-x!
[entity x]
(body-position! entity x (body-y entity) (body-z entity)))</pre></div></div></div></body></html>
|
{
"pile_set_name": "Github"
}
|
// Copyright 2011 The Kyua Authors.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/// \file store/metadata.hpp
/// Representation of the database metadata.
#if !defined(STORE_METADATA_HPP)
#define STORE_METADATA_HPP
#include "store/metadata_fwd.hpp"
extern "C" {
#include <stdint.h>
}
#include <cstddef>
#include "utils/sqlite/database_fwd.hpp"
namespace store {
/// Representation of the database metadata.
class metadata {
/// Current version of the database schema.
int _schema_version;
/// Timestamp of the last metadata entry in the database.
int64_t _timestamp;
metadata(const int, const int64_t);
public:
int64_t timestamp(void) const;
int schema_version(void) const;
static metadata fetch_latest(utils::sqlite::database&);
};
} // namespace store
#endif // !defined(STORE_METADATA_HPP)
|
{
"pile_set_name": "Github"
}
|
BOOST_URL = https://dl.bintray.com/boostorg/release/1.74.0/source/boost_1_74_0.tar.bz2
BOOST_ALTERNATIVE_URL = https://sourceforge.net/projects/boost/files/boost/1.74.0/boost_1_74_0.tar.bz2/download
BOOST_MD5 = 83bfc1507731a0906e387fc28b7ef5417d591429e51e788417fe9ff025e116b1
BOOST_TARBALL_NAME = $(notdir $(BOOST_URL))
BOOST_TARBALL = $(DOWNLOAD_DIR)/$(BOOST_TARBALL_NAME)
BOOST_BASE_NAME = $(patsubst %.tar.bz2,%,$(BOOST_TARBALL_NAME))
BOOST_SRC = $(OUT)/src/$(BOOST_BASE_NAME)
BOOST_PATCHES_DIR = $(topdir)/lib/boost/patches
BOOST_PATCHES = $(addprefix $(BOOST_PATCHES_DIR)/,$(shell cat $(BOOST_PATCHES_DIR)/series))
$(BOOST_TARBALL): | $(DOWNLOAD_DIR)/dirstamp
@$(NQ)echo " GET $@"
$(Q)./build/download.py $(BOOST_URL) $(BOOST_ALTERNATIVE_URL) $(BOOST_MD5) $(DOWNLOAD_DIR)
BOOST_UNTAR_STAMP = $(OUT)/src/stamp-$(BOOST_BASE_NAME)
$(BOOST_UNTAR_STAMP): $(BOOST_TARBALL) $(BOOST_PATCHES_DIR)/series $(BOOST_PATCHES) | $(OUT)/src/dirstamp
@$(NQ)echo " UNTAR $(BOOST_TARBALL_NAME)"
$(Q)rm -rf $(BOOST_SRC)
$(Q)tar xjfC $< $(OUT)/src
$(Q)cd $(BOOST_SRC) && QUILT_PATCHES=$(abspath $(BOOST_PATCHES_DIR)) quilt push -a -q
@touch $@
.PHONY: boost
boost: $(BOOST_UNTAR_STAMP)
# We use only the header-only Boost libraries, so no linker flags
# required.
BOOST_LDLIBS =
# reduce Boost header bloat a bit
BOOST_CPPFLAGS = -isystem $(OUT)/src/$(BOOST_BASE_NAME)
BOOST_CPPFLAGS += -DBOOST_NO_IOSTREAM -DBOOST_MATH_NO_LEXICAL_CAST
BOOST_CPPFLAGS += -DBOOST_UBLAS_NO_STD_CERR
BOOST_CPPFLAGS += -DBOOST_ERROR_CODE_HEADER_ONLY
BOOST_CPPFLAGS += -DBOOST_SYSTEM_NO_DEPRECATED
BOOST_CPPFLAGS += -DBOOST_NO_STD_LOCALE -DBOOST_LEXICAL_CAST_ASSUME_C_LOCALE
|
{
"pile_set_name": "Github"
}
|
文部科学大臣の所管に属する公益信託の引受けの許可及び監督に関する規則
(平成十九年九月二十八日文部科学省令第二十八号)
信託法
(平成十八年法律第百八号)及び信託法
の施行に伴う関係法律の整備等に関する法律(平成十八年法律第百九号)の施行に伴い、並びに公益信託ニ関スル法律
(大正十一年法律第六十二号)の第一条
から第九条
までの規定を実施するため、文部科学大臣の所管に属する公益信託の引受けの許可及び監督に関する規則(平成十二年総理府・文部省令第七号)の全部を改正する省令を次のように定める。
(趣旨)
第一条
文部科学大臣の所管に属する公益信託ニ関スル法律
(大正十一年法律第六十二号)(以下「法」という。)第一条
に規定する公益信託(公益信託に係る主務官庁の権限に属する事務の処理等に関する政令
(平成四年政令第百六十二号)第一条
の規定により文部科学大臣の同条第一項
に規定する権限に属する事務を都道府県の知事又は都道府県の教育委員会が行うこととされたものを除く。以下「公益信託」という。)に係る引受けの許可及び監督に関する手続については、この省令の定めるところによる。
(引受けの許可の申請)
第二条
法第二条第一項の規定により公益信託の引受けの許可を受けようとする者は、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
設定趣意書
二
信託行為の内容を示す書類
三
委託者となるべき者の履歴書
四
受託者となるべき者の履歴書
五
信託管理人を置く場合にあっては、信託管理人となるべき者の就任承諾書及び履歴書
六
運営委員会その他当該公益信託を適正に運営するために必要な機関(以下「運営委員会等」という。)を設置する場合にあっては、その名称及び構成員の数並びにその構成員となるべき者の就任承諾書及び履歴書
七
財産目録
八
預金、有価証券等の財産の権利及び価格を証する書類
九
引受け当初の信託事務年度及び翌信託事務年度(信託事務年度の定めがない信託にあっては、引受け後二年間)の事業計画書及び収支予算書
十
その他文部科学大臣が特に必要と認める書類
2
前項第三号から第五号までの規定において委託者、受託者又は信託管理人となるべき者が法人である場合にあっては、法人の名称、代表者の氏名、主たる事務所の所在地及び主たる業務を記載した書類を添付するものとする。
(財産の移転の報告)
第三条
引受けを許可された受託者は、遅滞なく、前条第一項第七号の財産目録記載の財産の移転を受け、その移転を終わった後一月以内に、これを証する登記所、銀行等の証明書類及び信託行為の謄本を添付して、その旨を文部科学大臣に報告しなければならない。
(事業計画書等の届出)
第四条
受託者は、毎信託事務年度(信託行為に別段の定めがないときは、毎年四月一日に始まり翌年三月三十一日に終わるものとする。以下同じ。)開始前に、翌年度の事業計画書及びこれに伴う収支予算書を文部科学大臣に届け出なければならない。
(事業計画書等の変更の届出)
第五条
受託者は、第二条第一項第九号の事業計画書及び収支予算書又は前条の事業計画書及び収支予算書を変更したときは、遅滞なく、これらを文部科学大臣に届け出なければならない。
(事業報告)
第六条
受託者は、毎信託事務年度終了後三月以内に、その年度末現在の財産目録を添付して、その年度における次に掲げる事項を文部科学大臣に報告しなければならない。
一
事業の状況
二
収支決算
(公告)
第七条
受託者は、前条の報告をした後、遅滞なく、前信託事務年度の事業及び財産の状況を公告しなければならない。
(信託の変更に係る書類の提出)
第八条
受託者は、法第五条第一項の特別の事情が生じたと認めるときは、次に掲げる書類を文部科学大臣に提出しなければならない。
一
信託の変更を必要とする事由を記載した書類
二
信託の変更案を記載した書類及び新旧対照表
2
前項の信託の変更が当該公益信託の事業内容に係るものである場合にあっては、同項各号の書類のほか、変更後の事業計画書及び収支予算書を添付しなければならない。
(信託の変更の許可の申請)
第九条
受託者は、法第六条の規定により信託の変更の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
信託の変更を必要とする事由を記載した書類
二
信託の変更をする根拠となる信託法
(平成十八年法律第百八号)の規定(同法第百四十九条第四項
の別段の定めがある場合には、当該定めの内容を含む。)を記載した書類
三
信託の変更案を記載した書類及び新旧対照表
2
前項の信託の変更が当該公益信託の事業内容に係るものである場合にあっては、同項各号の書類のほか、変更後の事業計画書及び収支予算書を添付しなければならない。
(信託の併合の許可の申請)
第十条
受託者は、法第六条の規定により信託の併合の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
信託の併合を必要とする事由を記載した書類
二
信託の併合をする根拠となる信託法
の規定(同法第百五十一条第三項
の別段の定めがある場合には、当該定めの内容を含む。)を記載した書類
三
信託の併合後の信託行為の内容を記載した書類及び新旧対照表
四
信託法第百五十二条第二項
の公告及び催告又は同条第三項
の公告をしたことその他信託法
の定める信託の併合の手続を経たことを証する書類
2
第二条第一項第五号から第十号までの規定は、前項の許可を受けようとする受託者について準用する。この場合において、同条第九号中「引受け」とあるのは「信託の併合」と読み替えるものとする。
(吸収信託分割の許可の申請)
第十一条
受託者は、法第六条の規定により吸収信託分割の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
吸収信託分割を必要とする事由を記載した書類
二
吸収信託分割をする根拠となる信託法
の規定(同法第百五十五条第三項
の別段の定めがある場合には、当該定めの内容を含む。)を記載した書類
三
吸収信託分割後の信託行為の内容を記載した書類及び新旧対照表
四
信託法第百五十六条第二項
の公告及び催告又は同条第三項
の公告をしたことその他信託法
の定める吸収信託分割の手続を経たことを証する書類
(新規信託分割の許可の申請)
第十二条
受託者は、法第六条の規定により新規信託分割の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
新規信託分割を必要とする事由を記載した書類
二
新規信託分割をする根拠となる信託法
の規定(同法第百五十九条第三項
の別段の定めがある場合には、当該定めの内容を含む。)を記載した書類
三
新規信託分割後の信託行為の内容を記載した書類及び新旧対照表
四
信託法第百六十条第二項
の公告及び催告又は同条第三項
の公告をしたことその他信託法
の定める新規信託分割の手続を経たことを証する書類
2
第二条第一項第五号から第十号までの規定は、前項の許可を受けようとする受託者について準用する。この場合において、同条第九号中「引受け」とあるのは「新規信託分割」と読み替えるものとする。
(受託者の辞任の許可の申請)
第十三条
受託者は、法第七条の規定により辞任の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
辞任しようとする事由を記載した書類
二
信託事務の処理の状況並びに信託財産に属する財産及び信託財産責任負担債務の状況を記載した書類
三
新たな受託者の選任に関する意見を記載した書類
(検査役の選任の申請)
第十四条
委託者又は信託管理人は、信託法第四十六条第一項
及び法第八条
の規定により検査役の選任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
選任を請求する事由を記載した書類
二
検査役の選任に関する意見を記載した書類
(受託者の解任の申請)
第十五条
委託者又は信託管理人は、信託法第五十八条第四項
及び法第八条
の規定により受託者の解任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
解任を請求する事由を記載した書類
二
新たな受託者の選任に関する意見を記載した書類
(新たな受託者の選任の申請)
第十六条
委託者、信託管理人又は運営委員会等の構成員(以下「利害関係人」という。)は、信託法第六十二条第四項
及び法第八条
の規定により新たな受託者の選任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
任務終了の事由を記載した書類
二
新たな受託者の選任に関する意見を記載した書類
三
新たな受託者となるべき者に係る第二条第一項第四号に掲げる書類及び就任承諾書
四
信託事務の処理の状況並びに信託財産に属する財産及び信託財産責任負担債務の状況を記載した書類
(信託財産管理命令の申請)
第十七条
利害関係人は、信託法第六十三条第一項
及び法第八条
の規定により信託財産管理者による管理を命ずる処分(以下この条において「信託財産管理命令」という。)を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
受託者の任務終了の事由を記載した書類
二
信託財産管理命令を請求する事由を記載した書類
三
信託財産管理者の選任に関する意見を記載した書類
(保存行為等の範囲を超える行為の許可の申請)
第十八条
信託財産管理者は、信託法第六十六条第四項
及び法第八条
の規定による許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
許可を受けようとする行為の概要を記載した書類
二
許可を受けようとする事由を記載した書類
2
前項の規定は、信託法第七十四条第六項
において準用する同法第六十六条第四項
及び法第八条
の規定により保存行為等の範囲を超える行為の許可を受けようとする信託財産法人管理人について準用する。
(信託財産管理者等の辞任の許可の申請)
第十九条
信託財産管理者は、信託法第七十条
において読み替えて準用する同法第五十七条第二項
及び法第八条
の規定により辞任の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
辞任しようとする事由を記載した書類
二
信託事務の処理の状況並びに信託財産に属する財産及び信託財産責任負担債務の状況を記載した書類
三
新たな信託財産管理者の選任に関する意見を記載した書類
2
前項の規定は、信託法第七十四条第六項
において準用する同法第七十条
の規定により辞任の許可を受けようとする信託財産法人管理人について準用する。この場合において、前項第三号中「新たな信託財産管理者」とあるのは、「新たな信託財産法人管理人」と読み替えるものとする。
(信託財産管理者等の解任の申請)
第二十条
委託者又は信託管理人は、信託法第七十条
において準用する同法第五十八条第四項
及び法第八条
の規定により信託財産管理者の解任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
解任を請求する事由を記載した書類
二
新たな信託財産管理者の選任に関する意見を記載した書類
2
前項の規定は、信託法第七十四条第六項
において準用する同法第七十条
の規定により信託財産法人管理人の解任を請求しようとする委託者又は信託管理人について準用する。この場合において、前項第二号中「新たな信託財産管理者」とあるのは、「新たな信託財産法人管理人」と読み替えるものとする。
(信託財産法人管理命令の申請)
第二十一条
利害関係人は、信託法第七十四条第二項
及び法第八条
の規定により信託財産法人管理人による管理を命ずる処分(以下この条において「信託財産法人管理命令」という。)を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
受託者の死亡の事実を記載した書類
二
信託財産法人管理命令を請求する事由を記載した書類
三
信託財産法人管理人の選任に関する意見を記載した書類
(信託管理人の選任の申請)
第二十二条
利害関係人は、信託法第百二十三条第四項
又は同法第二百五十八条第六項
及び法第八条
の規定により信託管理人の選任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
選任を請求する事由を記載した書類
二
信託管理人となるべき者に係る第二条第一項第五号に掲げる書類
(信託管理人の辞任の許可の申請)
第二十三条
信託管理人は、信託法第百二十八条第二項
において準用する同法第五十七条第二項
及び法第八条
の規定により辞任の許可を受けようとするときは、許可申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
辞任しようとする事由を記載した書類
二
信託事務の処理の状況並びに信託財産に属する財産及び信託財産責任負担債務の状況を記載した書類
三
新たな信託管理人の選任に関する意見を記載した書類
(信託管理人の解任の申請)
第二十四条
委託者又は他の信託管理人は、信託法第百二十八条第二項
において準用する同法第五十八条第四項
及び法第八条
の規定により信託管理人の解任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
解任を請求する事由を記載した書類
二
新たな信託管理人の選任に関する意見を記載した書類
(新たな信託管理人の選任の申請)
第二十五条
利害関係人は、信託法第百二十九条第一項
において準用する同法第六十二条第四項
及び法第八条
の規定により新たな信託管理人の選任を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
信託管理人の任務終了の事由を記載した書類
二
新たな信託管理人となるべき者に係る第二条第一項第五号に掲げる書類
(信託の終了の申請)
第二十六条
委託者、受託者又は信託管理人は、信託法第百六十五条第一項
及び法第八条
の規定により信託の終了を請求しようとするときは、申請書に次に掲げる書類を添付して、文部科学大臣に申請しなければならない。
一
信託の終了を請求する事由を記載した書類
二
信託事務の処理の状況並びに信託財産に属する財産及び信託財産責任負担債務の状況を記載した書類
三
残余財産の処分の見込みに関する書類
(諸届出)
第二十七条
受託者は、第三条から第六条までに定めるもののほか、次の各号のいずれかに該当するときは、遅滞なく、文部科学大臣に届け出なければならない。
一
委託者が死亡したとき(委託者が法人である場合にあっては、解散したとき。)。
二
委託者又は受託者の氏名、職業又は住所に変更があったとき(委託者又は受託者が法人である場合にあっては、その名称、代表者の氏名、主たる事務所の所在地又は主たる業務に変更があったとき。)。
三
信託管理人の氏名、職業又は住所に変更があったとき(信託管理人が法人である場合にあっては、その名称、代表者の氏名、主たる事務所の所在地又は主たる業務に変更があったとき。)。
四
信託管理人又は運営委員会等の構成員に変更があったとき。
2
前項第四号による届出の場合(運営委員会等の構成員が再任である場合を除く。)は、第二条第一項第五号又は第六号の書類を添付しなければならない。
(書類及び帳簿の備付け)
第二十八条
受託者は、その事務所に、次に掲げる書類及び帳簿を備えなければならない。ただし、他の法令の規定により、これらに代わる書類及び帳簿を備えたときは、この限りでない。
一
信託行為及びこれに附属する書類
二
利害関係人の名簿及び履歴書
三
運営委員会等の議事に関する書類
四
収入支出に関する帳簿及び証拠書類
五
資産台帳及び負債台帳
六
官公署往復書類
七
その他必要な書類及び帳簿
(業務の監督)
第二十九条
文部科学大臣は、法第三条及び同法第四条第一項の規定により、受託者に対し、報告を求め、又は資料を提出させることができ、また、その職員をして公益信託の業務の処理について実地に検査させることができる。
2
文部科学大臣は、前項の検査の結果、是正する必要があると認めるときは、法第四条第一項の規定により、受託者に対し、財産の供託その他必要な処分を命ずることができる。
3
文部科学大臣は、公益信託の監督上必要があると認めるときは、法第四条第一項の規定により、事業計画及びこれに伴う収支予算について変更を命じ、又は運営委員会等の設置を命ずることができる。
4
第一項の規定により、職員が実地検査をする場合においては、その身分を示す証明書を携帯し、関係人にこれを提示しなければならない。
(公益信託の終了の報告等)
第三十条
受託者は、信託が終了したときには、終了後一月以内に、信託の終了事由を記載した書類を文部科学大臣に提出しなければならない。
2
清算受託者は、信託の清算が結了したときは、清算結了後一月以内に、次に掲げる書類を添えた報告書を文部科学大臣に提出しなければならない。
一
信託の清算が結了した日の属する信託事務年度の事業状況報告書及び収支決算書
二
信託の清算結了時における財産目録
三
残余財産の処分に関する書類
附 則
この省令は、平成十九年九月三十日から施行する。
|
{
"pile_set_name": "Github"
}
|
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
|
{
"pile_set_name": "Github"
}
|
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
|
{
"pile_set_name": "Github"
}
|
"support_item_gold_ranked"
{
"challengetype" "148"
"desc" "#DOTA_ChallengeDesc_Support_Item_Gold"
"status_text" "#DOTA_ChallengeStatusText_Support_Item_Gold"
"events"
{
"matching_type" "game_state"
"query"
{
"state_values"
{
"1"
{
"key" "support_gold_spent"
"aggregator" "count"
}
}
}
"progress_stored_in" "1"
"post_tests"
{
"test_value"
{
"storage" "1"
"compare" ">="
"amount" "<support_gold_spent>"
}
}
}
"variables"
{
"<support_gold_spent>"
{
"format" "int"
"index" "0"
}
}
}
|
{
"pile_set_name": "Github"
}
|
#!/bin/bash
#@license RequireJS Copyright (c) 2010-2011, The Dojo Foundation All Rights Reserved.
#Available via the MIT or new BSD license.
#see: http://github.com/jrburke/requirejs for details
#version should be something like 0.9.0beta or 0.9.0
version=$1
if [ -z $version ]; then
echo "Please pass in a version number"
exit 1
fi
myDir=`cd \`dirname "$0"\`; pwd`
# First update the sub-projects with the latest.
cd ..
./updatesubs.sh
cd dist
# Setup a build directory
rm -rf ../../requirejs-build
mkdir ../../requirejs-build
# Create the version output dir
cd ../../requirejs-build
mkdir $version
mkdir $version/minified
mkdir $version/comments
# Copy over the r.js file, and set up that project for a dist checkin.
cp ../r.js/r.js $version/r.js
cp ../r.js/r.js ../r.js/dist/r-$version.js
# Copy over basic script deliverables
cp $myDir/../require.js $version/comments/require.js
cp $myDir/../text.js $version/comments/text.js
cp $myDir/../domReady.js $version/comments/domReady.js
cp $myDir/../order.js $version/comments/order.js
cp $myDir/../i18n.js $version/comments/i18n.js
# Minify any of the browser-based JS files
cd $version/comments
java -jar ../../../r.js/lib/closure/compiler.jar --js require.js --js_output_file ../minified/require.js
java -jar ../../../r.js/lib/closure/compiler.jar --js text.js --js_output_file ../minified/text.js
java -jar ../../../r.js/lib/closure/compiler.jar --js domReady.js --js_output_file ../minified/domReady.js
java -jar ../../../r.js/lib/closure/compiler.jar --js order.js --js_output_file ../minified/order.js
java -jar ../../../r.js/lib/closure/compiler.jar --js i18n.js --js_output_file ../minified/i18n.js
cd ../../../
|
{
"pile_set_name": "Github"
}
|
// NeL - MMORPG Framework <https://wiki.ryzom.dev/>
// Copyright (C) 2015 Winch Gate Property Limited
//
// This source file has been modified by the following contributors:
// Copyright (C) 2016 Jan BOON (Kaetemi) <jan.boon@kaetemi.be>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// Author: Jan BOON (Kaetemi) <jan.boon@kaetemi.be>
#include <nel/misc/types_nl.h>
#include "nel/pipeline/project_config.h"
#ifdef NL_OS_WINDOWS
# include <Windows.h>
#else
# include <stdlib.h>
#endif
#include <algorithm>
#include <nel/misc/debug.h>
#include <nel/misc/path.h>
#include <nel/misc/config_file.h>
using namespace std;
using namespace NLMISC;
namespace NLPIPELINE {
TPathString CProjectConfig::s_AssetConfigPath;
TPathString CProjectConfig::s_ProjectConfigPath;
std::vector<NLMISC::CConfigFile *> CProjectConfig::s_ConfigFiles;
std::vector<TPathString> CProjectConfig::s_ConfigPaths;
CProjectConfig CProjectConfig::s_Instance;
uint32 CProjectConfig::s_AssetConfigModification;
uint32 CProjectConfig::s_ProjectConfigModification;
CProjectConfig::Flags CProjectConfig::s_InitFlags = (CProjectConfig::Flags)0;
std::string CProjectConfig::s_ProjectName;
static std::set<TPathString> s_SearchPaths;
void CProjectConfig::cleanup()
{
for (std::vector<NLMISC::CConfigFile *>::iterator it(s_ConfigFiles.begin()), end(s_ConfigFiles.end()); it != end; ++it)
delete *it;
s_ConfigFiles.clear();
}
CProjectConfig::~CProjectConfig()
{
cleanup();
}
bool CProjectConfig::init(const std::string &asset, Flags flags, bool partial)
{
TPathString rootPath = NLMISC::CPath::standardizePath(asset, false);
TPathString configPath = rootPath + "/nel.cfg";
while (!CFile::fileExists(configPath))
{
std::string::size_type sep = CFile::getLastSeparator(rootPath);
if (sep == string::npos)
return false;
rootPath = rootPath.substr(0, sep);
if (rootPath.empty())
return false;
configPath = rootPath + "/nel.cfg";
}
rootPath += "/";
uint32 configFileModification = CFile::getFileModificationDate(configPath);
bool assetConfigSame = configPath == s_AssetConfigPath && s_AssetConfigModification == configFileModification && s_InitFlags == flags;
std::vector<TPathString> configRootPaths;
TPathString projectConfigPath;
uint32 projectConfigModification = 0;
std::string projectName;
if (partial)
{
if (assetConfigSame && s_ProjectConfigPath.empty())
return true; // Do not reload
}
else
{
if (assetConfigSame && !s_ProjectConfigPath.empty() && CFile::fileExists(s_ProjectConfigPath))
{
projectConfigModification = CFile::getFileModificationDate(s_ProjectConfigPath);
if (s_ProjectConfigModification == projectConfigModification)
return true; // Do not reload
}
// Search for project and load up all root paths
std::vector<std::string> files;
CPath::getPathContent(CPath::getApplicationDirectory("NeL", true) + "/projects", false, false, true, files);
for (std::vector<std::string>::iterator it(files.begin()), end(files.end()); it != end; ++it)
{
const std::string& file = *it;
if (file.length() >= 4 && (file.compare(file.length() - 4, 4, ".cfg") == 0))
{
CConfigFile project;
project.load(file);
CConfigFile::CVar &directories = project.getVar("Directories");
bool isProject = false;
for (uint i = 0; i < directories.size(); ++i)
{
if (rootPath == CPath::standardizePath(directories.asString(i), true))
{
isProject = true;
break;
}
}
if (isProject)
{
projectConfigModification = CFile::getFileModificationDate(file);
projectConfigPath = file;
for (uint i = 0; i < directories.size(); ++i)
{
std::string dir = CPath::standardizePath(directories.asString(i), true);
std::string cfgPath = dir + "nel.cfg";
if (CFile::fileExists(cfgPath))
configRootPaths.push_back(dir);
}
projectName = project.getVar("ProjectName").asString();
break;
}
}
}
}
if (projectConfigPath.empty())
{
projectName = "NeL Project";
configRootPaths.push_back(rootPath);
projectConfigModification = 0;
}
nldebug("Initializing project config '%s'", projectConfigPath.empty() ? configPath.c_str() : projectConfigPath.c_str());
release();
s_InitFlags = flags;
s_AssetConfigPath = configPath;
s_AssetConfigModification = configFileModification;
s_ProjectConfigPath = projectConfigPath;
s_ProjectConfigModification = projectConfigModification;
s_ProjectName = projectName;
s_ConfigPaths = configRootPaths;
std::map<std::string, CConfigFile *> configFiles;
for (std::vector<TPathString>::iterator it(configRootPaths.begin()), end(configRootPaths.end()); it != end; ++it)
{
const std::string &dir = *it;
const std::string &cfgPath = *it + "nel.cfg";
CConfigFile *cfgFile = new CConfigFile();
cfgFile->load(cfgPath);
std::string identifier = cfgFile->getVar("Identifier").asString();
if (configFiles.find(identifier) != configFiles.end()) // Identifier already exists
{
if (dir == rootPath)
{
// Replace config that was already added, asset root gets priority
std::vector<NLMISC::CConfigFile *>::iterator old = std::find(s_ConfigFiles.begin(), s_ConfigFiles.end(), configFiles[identifier]);
uint idx = old - s_ConfigFiles.begin();
s_ConfigFiles.erase(old);
s_ConfigPaths.erase(s_ConfigPaths.begin() + idx);
}
else
{
// Skip, first listed config gets priority
s_ConfigPaths.erase(s_ConfigPaths.begin() + s_ConfigFiles.size());
continue;
}
}
#ifdef NL_OS_WINDOWS
SetEnvironmentVariableA(identifier.c_str(), dir.c_str());
#else
setenv(identifier.c_str(), dir.c_str(), 1);
#endif
configFiles[identifier] = cfgFile;
s_ConfigFiles.push_back(cfgFile);
}
nlassert(s_ConfigFiles.size() == s_ConfigPaths.size());
if (flags & DatabaseTextureSearchPaths)
{
searchDirectories("DatabaseTextureSearchPaths");
}
return true;
}
void CProjectConfig::searchDirectories(const char *var)
{
for (uint i = 0; i < s_ConfigFiles.size(); ++i)
{
CConfigFile *cfg = s_ConfigFiles[i];
const TPathString &dir = s_ConfigPaths[i];
CConfigFile::CVar *paths = cfg->getVarPtr(var);
if (paths)
{
for (uint i = 0; i < paths->size(); i++)
{
TPathString path = paths->asString(i);
if (!CPath::isAbsolutePath(path)) path = dir + path;
path = CPath::standardizePath(path);
if (s_SearchPaths.find(path) == s_SearchPaths.end())
{
CPath::addSearchPath(path);
s_SearchPaths.insert(path);
}
}
}
}
}
void CProjectConfig::release()
{
s_SearchPaths.clear();
CPath::clearMap();
cleanup();
}
} /* namespace NLPIPELINE */
/* end of file */
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<html>
<head>
<title>Hello Endpoints!</title>
<script async src="/js/base.js"></script>
<link rel="stylesheet" href="/bootstrap/css/bootstrap.css">
<link rel="stylesheet" href="/bootstrap/css/bootstrap-responsive.css">
<style>
body {
padding-top: 40px;
padding-bottom: 40px;
background-color: #f5f5f5;
}
blockquote {
margin-bottom: 10px;
border-left-color: #bbb;
}
form {
margin-top: 10px;
}
form label {
width: 90px;
display: inline-block;
}
.form-signin input[type="text"] {
font-size: 16px;
height: auto;
margin-bottom: 15px;
padding: 7px 9px;
}
.row {
margin-left: 0px;
margin-top: 10px;
overflow: scroll;
}
</style>
</head>
<body>
<div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<button type="button" class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="brand" href="#">Hello Endpoints!</a>
<div class="nav-collapse collapse pull-right">
<a href="javascript:void(0);" class="btn" id="signinButton">Sign in</a>
</div>
</div>
</div>
</div>
<div class="container">
<output id="outputLog"></output>
<form onsubmit="return false">
<h2>Get Greeting</h2>
<div><label for="id">Greeting ID:</label><input id="id" /></div>
<div><input id="getGreeting" type="submit" class="btn btn-small" value="Submit"></div>
</form>
<form onsubmit="return false">
<h2>List Greetings</h2>
<div><input id="listGreeting" type="submit" class="btn btn-small" value="Submit"></div>
</form>
<form onsubmit="return false">
<h2>Multiply Greetings</h2>
<div><label for="greeting">Greeting:</label><input id="greeting" /></div>
<div><label for="count">Count:</label><input id="count" /></div>
<div><input id="multiplyGreetings" type="submit" class="btn btn-small" value="Submit"></div>
</form>
<form onsubmit="return false">
<h2>Authenticated Greeting</h2>
<div><input id="authedGreeting" type="submit" class="btn btn-small" disabled value="Submit"></div>
</form>
<script>
function init() {
google.appengine.samples.hello.init('//' + window.location.host + '/_ah/api');
}
</script>
<script src="https://apis.google.com/js/client.js?onload=init"></script>
</div>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
# Developing iOS 10 Apps with Swift 字幕简体中文翻译项目
## iOS 11 已出,请大家移步 [https://github.com/ApolloZhu/Developing-iOS-11-Apps-with-Swift](https://github.com/ApolloZhu/Developing-iOS-11-Apps-with-Swift)
<details>
<summary>
已完成前六集翻译,<a href="https://github.com/ApolloZhu/Developing-iOS-10-Apps-with-Swift/archive/master.zip">点此下载</a>(字幕在 <code>subtitles</code> 文件夹里),其他课程录像和资料等可以查看<a href="./tools/download.md">下载列表</a>。但本项目在 <a href="https://github.com/ApolloZhu/Developing-iOS-11-Apps-with-Swift">iOS 11 翻译</a> 完成前不再继续翻译。如有需要可以参考其他项目提供的机器翻译,比如 <a href="https://github.com/iam36/Translate-Developing-iOS-10-Apps-with-Swift">有道字典机翻</a> 和 <a href="https://github.com/JaminZhou/Developing-iOS-10-Apps-with-Swift">另一个机翻+粗校</a>。<strong><u>本项目不对机器翻译内容负责。</u></strong>
</summary>
[English Version (Outdated)](./en/README.md)
[](#backers) [](#sponsors)
### 版权说明
<a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/3.0/us/deed.zh"><img alt="知识共享许可协议" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/3.0/us/88x31.png" /></a>
本项目(包括字幕,代码等),以及原斯坦福课程均采用 <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/3.0/us/deed.zh">知识共享 署名-非商业性使用-相同方式共享 3.0 美国 许可协议</a> 进行许可。
----
如果您感兴趣,有能力,我们欢迎您参与翻译/校对本项目。详情见 [任务相关说明](./CONTRIBUTING.md)([失效备份](https://github.com/ApolloZhu/Developing-iOS-10-Apps-with-Swift/blob/master/CONTRIBUTING.md))。
关于如何获得更新,请查看 [常见问题与解答](./SUPPORT.md) 中的字幕更新章节。
如果您想支持我们,请点击项目右上角的 <a class="github-button" href="https://github.com/ApolloZhu/Developing-iOS-10-Apps-with-Swift" data-icon="octicon-star" data-show-count="true" aria-label="Star ApolloZhu/Developing-iOS-10-Apps-with-Swift on GitHub">Star</a> 按钮来 Star 本项目。其他如分享和打赏等方式请查看 [常见问题与解答](./SUPPORT.md) 中的支持我们章节。
### 下载
(从 iTunes U 提取),其中点击 `中英字幕` 就可以下载已经翻译好了的字幕。另一个方案是 [下载整个项目](https://github.com/ApolloZhu/Developing-iOS-10-Apps-with-Swift/archive/master.zip)。`subtitles` 文件夹中是中英字幕(不一定都翻译了),`en/subtitles` 文件夹中是纯英文字幕。
字幕的格式是 `.srt`,所以您可能需要用带外挂字幕功能的视频播放器,比如 [VLC](http://www.videolan.org/vlc/index.zh.html) 或 [IINA](https://lhc70000.github.io/iina/zh-cn/) 等才可使用。其他字幕使用问题的解答请查看 [常见问题与解答](./SUPPORT.md) 的字幕使用章节。
如果您有任何建议或意见,或是在观看视频时发现了翻译有误的地方,请通过 [常见问题与解答](./SUPPORT.md) 中提供的联系方式反馈。
#### 课程相关资源
- iTunes U:[Developing iOS 10 Apps with Swift - Free Course by Stanford](https://itunes.apple.com/us/course/developing-ios-10-apps-with-swift/id1198467120)
- [RSS 源](https://p1-u.itunes.apple.com/WebObjects/LZStudent.woa/ra/feed/COETAIHAJLZIQXJI)
- [课程专辑封面](http://a2.mzstatic.com/us/r30/CobaltPublic122/v4/6b/66/d0/6b66d0af-d47f-37d6-9993-9c5237401a49/d3_64_2x.png)
- [首页推广图片](http://a2.mzstatic.com/us/r30/Features122/v4/79/cb/ce/79cbce27-b961-9dfb-f044-21686543edf8/flowcase_1360_520_2x.jpeg)
- CS 193P 课程地址:[CS 193P iPhone Application Development](http://web.stanford.edu/class/cs193p/cgi-bin/drupal/)
- Paul Hegarty:[piazza](https://piazza.com/professors/show/paul_hegarty)
#### 项目相关资源
- [Developing iOS 9 Apps with Swift 字幕翻译](https://github.com/SwiftGGTeam/Developing-iOS-9-Apps-with-Swift)
- [Developing iOS 8 Apps with Swift 字幕翻译](https://github.com/X140Yu/Developing_iOS_8_Apps_With_Swift)
- 提取字幕:[CCExtractor](https://www.ccextractor.org/)
- 字幕重排:[X140Yu/Developing_iOS_8_Apps_With_Swift/tools/trim.rb](https://github.com/X140Yu/Developing_iOS_8_Apps_With_Swift/blob/master/tools/trim.rb)
----
## Contributors
This project exists thanks to all the people who contribute. [[Contribute]](CONTRIBUTING.md).
<a href="graphs/contributors"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/contributors.svg?width=890" /></a>
## Backers
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/Developing-iOS-10-Apps-with-Swift#backer)]
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift#backers" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/backers.svg?width=890"></a>
## Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/Developing-iOS-10-Apps-with-Swift#sponsor)]
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/0/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/1/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/2/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/3/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/4/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/5/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/6/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/7/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/8/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/9/website" target="_blank"><img src="https://opencollective.com/Developing-iOS-10-Apps-with-Swift/sponsor/9/avatar.svg"></a>
<script async defer src="https://buttons.github.io/buttons.js">/*请忽略这段代码*/</script>
<script type="text/javascript">
window.onload = function () {
var realH1 = document.getElementById("developing-ios-10-apps-with-swift-字幕简体中文翻译项目");
document.getElementsByClassName("project-name")[0].innerHTML = realH1.innerHTML;
realH1.style.display="none";
}
</script>
</details>
|
{
"pile_set_name": "Github"
}
|
#!/bin/bash
#set -x # DEBUG
# launch the unit tests suite.
# run this script like this: ./Scripts/test.sh [build_dir]
# if no build_dir is specified, ./build is assumed to be the default
# build directory
build_dir="build"
if [ "$#" -eq 1 ]; then
build_dir=$1
fi
RDBASE=$PWD
PYTHONPATH=$RDBASE
LD_LIBRARY_PATH=$RDBASE/lib:$LD_LIBRARY_PATH
# run tests in parallel
nprocs=`getconf _NPROCESSORS_ONLN`
cd $build_dir && ctest --output-on-failure -j ${nprocs}
|
{
"pile_set_name": "Github"
}
|
use azure_sdk_storage_core::{EndpointProtocol, ConnectionStringBuilder};
pub fn main() {
let account_name =
std::env::var("STORAGE_ACCOUNT").expect("Set env variable STORAGE_ACCOUNT first!");
let account_key = std::env::var("ACCOUNT_KEY").expect("Set env variable ACCOUNT_KEY first!");
let default_endpoints_protocol = std::env::var("DEFAULT_ENDPOINTS_PROTOCOL")
.expect("Set env variable DEFAULT_ENDPOINTS_PROTOCOL first!");
let default_endpoints_protocol = match &default_endpoints_protocol[..] {
"https" => EndpointProtocol::Https,
"http" => EndpointProtocol::Http,
_ => panic!("Invalid default endpoints protocol")
};
let connection_string = ConnectionStringBuilder::new()
.with_account_name(&account_name)
.with_account_key(&account_key)
.with_default_endpoints_protocol(default_endpoints_protocol)
.build();
println!("The connection string is: '{}'", connection_string);
}
|
{
"pile_set_name": "Github"
}
|
/* Translation-Revision-Date: 2020-05-26 04:07:15+0000 */
/* Plural-Forms: nplurals=1; plural=0; */
/* Generator: GlotPress/2.4.0-alpha */
/* Language: fa */
/* Number of Characters in a note */
"%@ Character" = "%@ نویسه";
/* Number of Characters in a note */
"%@ Characters" = "%@ نویسه";
/* Number of words in a note */
"%@ Word" = "%@ Word";
/* Number of words in a note */
"%@ Words" = "%@ Words";
/* Number of found search results */
"%d Result" = "%d Result";
/* Number of found search results */
"%d Results" = "%d Results";
/* Number of failed entries entering in passcode */
"%i Failed Passcode Attempt" = "%i Failed Passcode Attempt";
/* Number of failed entries entering in passcode */
"%i Failed Passcode Attempts" = "%i Failed Passcode Attempts";
/* Password Requirement: Length */
"- Minimum of 8 characters" = "- دستِکم ۸ نویسه";
/* Password Requirement: Special Characters */
"- Neither tabs nor newlines are allowed" = "- جهش (tab) و سرخط مجاز نیست";
/* Password Requirement: Email Match */
"- Password cannot match email" = "- گذرواژه نمیتواند مطابقِ ایمیل باشد";
/* 1 minute passcode lock timeout */
"1 Minute" = "۱ دقیقه";
/* 15 seconds passcode lock timeout */
"15 Seconds" = "۱۵ ثانیه";
/* 2 minutes passcode lock timeout */
"2 Minutes" = "۲ دقیقه";
/* 3 minutes passcode lock timeout */
"3 Minutes" = "۳ دقیقه";
/* 30 seconds passcode lock timeout */
"30 Seconds" = "۳۰ ثانیه";
/* 4 minutes passcode lock timeout */
"4 Minutes" = "۴ دقیقه";
/* 5 minutes passcode lock timeout */
"5 Minutes" = "۵ دقیقه";
/* Display app about screen */
"About" = "درباره";
/* Accept Action
Label of accept button on alert dialog */
"Accept" = "Accept";
/* No comment provided by engineer. */
"Account" = "Account";
/* Noun - collaborators are other Simplenote users who you chose to share a note with */
"Add a new collaborator..." = "Add a new collaborator...";
/* Placeholder test in textfield when adding a new tag to a note */
"Add a tag..." = "برچسب...";
/* Label on button to add a new tag to a note */
"Add tag" = "Add tag";
/* Title: No filters applied */
"All Notes" = "All Notes";
/* Sort Mode: Alphabetically */
"Alphabetically" = "الفبایی";
/* Sort Mode: Alphabetically, ascending */
"Alphabetically: A-Z" = "Alphabetically: A-Z";
/* Sort Mode: Alphabetically, descending */
"Alphabetically: Z-A" = "Alphabetically: Z-A";
/* Sign in error message */
"An error was encountered while signing in." = "An error was encountered while signing in.";
/* No comment provided by engineer. */
"Appearance" = "نما";
/* Other Simplenote apps */
"Apps" = "Apps";
/* Automattic hiring description */
"Are you a developer? Automattic is hiring." = "Are you a developer? Automattic is hiring.";
/* Empty Trash Warning */
"Are you sure you want to empty the trash? This cannot be undone." = "Are you sure you want to empty the trash? This cannot be undone.";
/* User Authenticated */
"Authenticated" = "Authenticated";
/* Title of Back button for Markdown preview */
"Back" = "بازگشت";
/* The Simplenote blog */
"Blog" = "وبنوشت";
/* Terms of Service Legend *PREFIX*: printed in dark color */
"By creating an account you agree to our" = "By creating an account you agree to our";
/* Simplenote terms of service */
"California Users Privacy Notice" = "California Users Privacy Notice";
/* Cancel Action
Dismissing an interface
Verb, cancel an alert dialog */
"Cancel" = "Cancel";
/* Verb - work with others on a note */
"Collaborate" = "Collaborate";
/* No comment provided by engineer. */
"Collaboration has moved" = "Collaboration has moved";
/* Noun - collaborators are other Simplenote users who you chose to share a note with */
"Collaborators" = "Collaborators";
/* Option to make the note list show only 1 line of text. The default is 3. */
"Condensed Note List" = "Condensed Note List";
/* Contribute to the Simplenote apps on github */
"Contribute" = "Contribute";
/* No comment provided by engineer. */
"Could be better" = "Could be better";
/* Error for bad email or password */
"Could not create an account with the provided email address and password." = "Could not create an account with the provided email address and password.";
/* Message displayed when login fails */
"Could not login with the provided email address and password." = "Could not login with the provided email address and password.";
/* No comment provided by engineer. */
"Could you tell us how we could improve?" = "Could you tell us how we could improve?";
/* Alert dialog title displayed on sign in error */
"Couldn't Sign In" = "Couldn't Sign In";
/* Siri Suggestion to create a New Note */
"Create a New Note" = "ایجادِ یک یادداشتِ تازه";
/* No comment provided by engineer. */
"Create a new note" = "Create a new note";
/* Sort Mode: Creation Date */
"Created" = "ساختشده";
/* Sort Mode: Creation Date, descending */
"Created: Newest" = "ایجادشده: جدیدترین";
/* Sort Mode: Creation Date, ascending */
"Created: Oldest" = "ایجادشده : قدیمیترین";
/* No comment provided by engineer. */
"Current Collaborators" = "Current Collaborators";
/* Theme: Dark */
"Dark" = "تاریک";
/* Debug Screen Title
Display internal debug status */
"Debug" = "Debug";
/* Verb: Delete notes and log out of the app */
"Delete Notes" = "Delete Notes";
/* No comment provided by engineer. */
"Disable Markdown formatting" = "Disable Markdown formatting";
/* No comment provided by engineer. */
"Dismiss keyboard" = "Dismiss keyboard";
/* Done toolbar button
Verb: Close current view */
"Done" = "Done";
/* Edit Tags Action: Visible in the Tags List */
"Edit" = "ویرایش";
/* Email TextField Placeholder */
"Email" = "ایمیل";
/* Email Taken Alert Title */
"Email in use" = "Email in use";
/* Verb - empty causes all notes to be removed permenently from the trash */
"Empty" = "Empty";
/* Remove all notes from the trash */
"Empty trash" = "Empty trash";
/* No comment provided by engineer. */
"Enable Markdown formatting" = "Enable Markdown formatting";
/* Number of objects enqueued for processing */
"Enqueued" = "Enqueued";
/* Message shown on passcode lock screen */
"Enter a passcode" = "Enter a passcode";
/* Touch ID fallback title */
"Enter passcode" = "Enter passcode";
/* Pin Lock */
"Enter your passcode" = "Enter your passcode";
/* Offer to enable Face ID support if available and passcode is on. */
"Face ID" = "Face ID";
/* Show a message when user enter 3 wrong passcodes */
"FailedAttempts" = "FailedAttempts";
/* Password Reset Action */
"Forgotten password?" = "گذرواژه فراموش شده؟";
/* No comment provided by engineer. */
"Great! Mind leaving a review to tell us what you like?" = "عالی! اگر مشکلی نیست لطفاً با نوشتنِ یک بررسی به ما بگویید چه چیزی دوست دارید؟";
/* Privacy Details */
"Help us improve Simplenote by sharing usage data with our analytics tool." = "Help us improve Simplenote by sharing usage data with our analytics tool.";
/* Noun - the version history of a note */
"History" = "History";
/* Action - view the version history of a note */
"History..." = "History...";
/* No comment provided by engineer. */
"I like it" = "I like it";
/* Last Message timestamp */
"LastSeen" = "آخرین بازدید";
/* Learn More Action */
"Learn more" = "بیشتر بدانید";
/* No comment provided by engineer. */
"Leave a review" = "Leave a review";
/* Theme: Light */
"Light" = "روشن";
/* Setting for when the passcode lock should enable */
"Lock Timeout" = "Lock Timeout";
/* Log In Action
Login Action
LogIn Action
LogIn Interface Title */
"Log In" = "ثبتِ ورود";
/* Log out of the active account in the app */
"Log Out" = "Log Out";
/* Allows the user to SignIn using their WPCOM Account */
"Log in with WordPress.com" = "Log in with WordPress.com";
/* Presents the regular Email signin flow */
"Log in with email" = "Log in with email";
/* Month and day date formatter */
"MMM d" = "d MMM";
/* Month, day, and time date formatter */
"MMM d, h:mm a" = "d MMM، h:mm a";
/* Month, day, and year date formatter */
"MMM d, yyyy" = "d MMM yyyy";
/* Month and year date formatter */
"MMM yyyy" = "MMM yyyy";
/* Special formatting that can be turned on for notes */
"Markdown" = "Markdown";
/* Switch which marks a note as using Markdown formatting or not */
"Markdown toggle" = "Markdown toggle";
/* Terminoligy used for sidebar UI element where tags are displayed */
"Menu" = "Menu";
/* Sort Mode: Modified Date */
"Modified" = "تغییریافته";
/* Sort Mode: Modified Date, descending */
"Modified: Newest" = "Modified: Newest";
/* Sort Mode: Modified Date, ascending */
"Modified: Oldest" = "Modified: Oldest";
/* Label to create a new note */
"New note" = "New note";
/* Empty Note Placeholder */
"New note..." = "یادداشت تازه...";
/* Alert's Cancel Action
Cancels Empty Trash Action */
"No" = "خیر";
/* Message shown in note list when no notes are in the current view */
"No Notes" = "No Notes";
/* Message shown when no notes match a search string */
"No Results" = "No Results";
/* No comment provided by engineer. */
"No thanks" = "نه ممنون";
/* No comment provided by engineer. */
"Note not published" = "Note not published";
/* Notes Header (Search Mode)
Plural form of notes */
"Notes" = "Notes";
/* Dismisses an AlertController */
"OK" = "OK";
/* Instant passcode lock timeout */
"Off" = "Off";
/* No comment provided by engineer. */
"On" = "On";
/* Siri Suggestion to open a specific Note */
"Open \"(preview)\"" = "Open \"(preview)\"";
/* Siri Suggestion to open our app */
"Open Simplenote" = "Open Simplenote";
/* Select a note to view in the note editor */
"Open note" = "Open note";
/* AlertController's Payload for the broken Sort Options Fix */
"Our update may have changed the order in which your notes appear. Would you like to review sort settings?" = "Our update may have changed the order in which your notes appear. Would you like to review sort settings?";
/* A 4-digit code to lock the app when it is closed */
"Passcode" = "Passcode";
/* Pin Lock */
"Passcodes did not match. Try again." = "Passcodes did not match. Try again.";
/* Password TextField Placeholder */
"Password" = "گذرواژه";
/* Message displayed when password is invalid (Signup) */
"Password cannot match email" = "گذرواژه نمیتواند مطابقِ ایمیل باشد";
/* Message displayed when password is too short. Please preserve the Percent D! */
"Password must contain at least %d characters" = "گذرواژه دستِکم باید حاویِ %d نویسه باشد";
/* Message displayed when a password contains a disallowed character */
"Password must not contain tabs nor newlines" = "گذرواژه نمیباید حاوی جهش (tab) و سرخط باشد";
/* Number of changes pending to be sent */
"Pendings" = "Pendings";
/* Action to mark a note as pinned */
"Pin note" = "Pin note";
/* Denotes when note is pinned to the top of the note list */
"Pin to Top" = "سنجاق به بالا";
/* Switch which marks a note as pinned or unpinned */
"Pin toggle" = "Pin toggle";
/* Pinned notes are stuck to the note of the note list */
"Pinned" = "سنجاقشده";
/* Error message displayed when user has not verified their WordPress.com account */
"Please activate your WordPress.com account via email and try again." = "لطفاً حسابِ WordPress.com ِخود را از راهِ ایمیل فعال کنید و سپس دوباره تلاش کنید.";
/* Title of Markdown preview screen */
"Preview" = "پیشنمایش";
/* Simplenote privacy policy */
"Privacy Policy" = "Privacy Policy";
/* Privacy Settings */
"Privacy Settings" = "Privacy Settings";
/* Verb - Publishing a note creates URL and for any note in a user's account, making it viewable to others */
"Publish" = "Publish";
/* Action which published a note to a web page */
"Publish note" = "Publish note";
/* Switch which marks a note as published or unpublished */
"Publish toggle" = "Publish toggle";
/* No comment provided by engineer. */
"Published" = "منتشرشده";
/* Message shown when a note is in the processes of being published */
"Publishing..." = "در حالِ انتشار...";
/* Prompt when setting up a passcode */
"Re-enter a passcode" = "Re-enter a passcode";
/* Reachs Internet */
"Reachability" = "Reachability";
/* No comment provided by engineer. */
"Remove all notes from trash" = "Remove all notes from trash";
/* Rename a tag */
"Rename" = "تغییرِ نام";
/* Reset Action */
"Reset" = "بازنشانی";
/* Password Reset Required Alert Title */
"Reset Required" = "بازنشانی لازم است";
/* Restore a note to a previous version */
"Restore Note" = "Restore Note";
/* Search Placeholder
Using Search instead of Back if user is searching */
"Search" = "جستوجو";
/* Tags Header (Search Mode) */
"Search by Tag" = "جستجو برپایهٔ برچسب";
/* SearchBar's Placeholder Text */
"Search notes or tags" = "جستوجویِ یادداشتها یا برچسبها";
/* No comment provided by engineer. */
"Security" = "امنیت";
/* Verb - send the content of the note by email, message, etc */
"Send" = "ارسال";
/* For debugging use */
"Send a Test Crash" = "Send a Test Crash";
/* No comment provided by engineer. */
"Send feedback" = "Send feedback";
/* Prompt when setting up a passcode */
"Set Passcode" = "Set Passcode";
/* Title of options screen */
"Settings" = "تنظیمات";
/* Option to disable Analytics. */
"Share Analytics" = "Share Analytics";
/* No comment provided by engineer. */
"Share note" = "Share note";
/* No comment provided by engineer. */
"Sharing notes is now accessed through the action menu from the toolbar." = "Sharing notes is now accessed through the action menu from the toolbar.";
/* UI region to the left of the note list which shows all of a users tags */
"Sidebar" = "نوارِ کناری";
/* Signup Action
SignUp Action
SignUp Interface Title */
"Sign Up" = "نامنویسی";
/* Alert message displayed when an account has unsynced notes */
"Signing out will delete any unsynced notes. You can verify your synced notes by signing in to the Web App." = "Signing out will delete any unsynced notes. You can verify your synced notes by signing in to the Web App.";
/* Our mighty brand! */
"Simplenote" = "Simplenote";
/* Simplenote's Feedback Email Title */
"Simplenote iOS Feedback" = "بازخوردِ Simplenote iOS";
/* Authentication Error Alert Title */
"Sorry!" = "پوزش!";
/* Option to sort tags alphabetically. The default is by manual ordering. */
"Sort Alphabetically" = "مرتبسازیِ الفبایی";
/* Option to sort notes in the note list alphabetically. The default is by modification date
Sort Order for the Notes List */
"Sort Order" = "Sort Order";
/* Sort By Title */
"Sort by:" = "چینش بر پایهٔ:";
/* Theme: Matches iOS Settings */
"System Default" = "پیشفرضِ سامانه";
/* No comment provided by engineer. */
"Tags" = "برچسبها";
/* Terms of Service Legend *SUFFIX*: Concatenated with a space, after the PREFIX, and printed in blue */
"Terms and Conditions" = "Terms and Conditions";
/* Simplenote terms of service */
"Terms of Service" = "شرایطِ خدمات";
/* Error when address is in use */
"The email you've entered is already associated with a Simplenote account." = "ایمیلی که فراهم کردهاید از پیش با یک حسابِ Simplenote مرتبط است.";
/* Onboarding Header Text */
"The simplest way to keep notes." = "سادهترین روش برای حفظِ یادداشتها.";
/* Option to enable the dark app theme. */
"Theme" = "پوسته";
/* Simplenote Themes */
"Themes" = "پوستهها";
/* Touch ID reason/explanation */
"To unlock the application" = "To unlock the application";
/* Displayed as a date in the case where a note was modified today, for example */
"Today" = "امروز";
/* Accessibility hint used to show or hide the sidebar */
"Toggle tag sidebar" = "Toggle tag sidebar";
/* Offer to enable Touch ID support if available and passcode is on. */
"Touch ID" = "Touch ID";
/* Title: Trash Tag is selected */
"Trash-noun" = "زبالهدان";
/* Trash (verb) - the action of deleting a note */
"Trash-verb" = "دور انداختن";
/* Prompt when disabling passcode */
"Turn off Passcode" = "Turn off Passcode";
/* Message shown when gaining entry to app via a passcode */
"Unlock %@" = "Unlock %@";
/* Action to mark a note as unpinned */
"Unpin note" = "Unpin note";
/* Action which unpublishes a note */
"Unpublish note" = "Unpublish note";
/* Message shown when a note is in the processes of being unpublished */
"Unpublishing..." = "Unpublishing...";
/* Alert title displayed in settings when an account has unsynced notes */
"Unsynced Notes Detected" = "Unsynced Notes Detected";
/* Title: Untagged Notes are onscreen */
"Untagged" = "برچسبنخورده";
/* Allows selecting notes with no tags */
"Untagged Notes" = "Untagged Notes";
/* A user's Simplenote account */
"Username" = "نامِ کاربری";
/* Represents a snapshot in time for a note */
"Version" = "Version";
/* App version number */
"Version %@" = "نسخهٔ %@";
/* Visit app.simplenote.com in the browser */
"Visit Web App" = "Visit Web App";
/* Generic error */
"We're having problems. Please try again soon." = "در حالِ حاضر با مشکلاتی مواجه هستیم. لطفاً بهزودی دوباره امتحان کنید.";
/* WebSocket Status */
"WebSocket" = "WebSocket";
/* No comment provided by engineer. */
"What do you think about Simplenote?" = "What do you think about Simplenote?";
/* Work at Automattic */
"Work With Us" = "با ما همکاری کنید";
/* Alert's Accept Action
Proceeds with the Empty Trash OP */
"Yes" = "بله";
/* Displayed as a date in the case where a note was modified yesterday, for example */
"Yesterday" = "دیروز";
/* Message displayed when email address is invalid */
"Your email address is not valid" = "نشانیِ ایمیل شما معتبر نیست";
/* Password Requirements: Title */
"Your password is insecure and must be reset. The password requirements are:" = "گذرواژهٔ شما ایمن نیست و باید بازنشانی شود. ملزوماتِ گذرواژه از این قرارند:";
/* Accessibility hint on button which shows the current collaborators on a note */
"collaborate-accessibility-hint" = "collaborate-accessibility-hint";
/* No comment provided by engineer. */
"collaborators-description" = "collaborators-description";
/* Warning message shown when current note is deleted on another device */
"deleted-note-warning" = "deleted-note-warning";
/* Accessibility hint on button which shows the history of a note */
"history-accessibility-hint" = "history-accessibility-hint";
/* VoiceOver accessibiliity hint on button which shows or hides the menu */
"menu-accessibility-hint" = "menu-accessibility-hint";
/* VoiceOver accessibiliity hint on the button that closes the notes editor and navigates back to the note list */
"notes-accessibility-hint" = "notes-accessibility-hint";
/* Accessibility hint on share button */
"share-accessibility-hint" = "اشتراکگذاریِ محتوایِ یادداشتِ کنونی";
/* Accessibility hint for adding a tag to a note */
"tag-add-accessibility-hint" = "tag-add-accessibility-hint";
/* No comment provided by engineer. */
"tag-delete-accessibility-hint" = "tag-delete-accessibility-hint";
/* Accessibility hint on button which moves a note to the trash */
"trash-accessibility-hint" = "انتقالِ یادداشتِ کنونی به زبالهدان";
/* Error alert message shown when trying to view history of a note without an internet connection */
"version-alert-message" = "version-alert-message";
/* Accessiblity hint describing how to reset the current note to a previous version */
"version-cell-accessibility-hint" = "version-cell-accessibility-hint";
/* Accessibility hint used when previous versions of a note are being fetched */
"version-cell-fetching-accessibility-hint" = "version-cell-fetching-accessibility-hint";
/* A welcome note for new iOS users */
"welcomeNote-iOS" = "welcomeNote-iOS";
|
{
"pile_set_name": "Github"
}
|
export { default as export0 } from "./lib";
export { default as export1 } from "./lib.dom";
export { default as export2 } from "./lib.dom.iterable";
export { default as export3 } from "./lib.es2015.collection";
export { default as export4 } from "./lib.es2015.core";
export { default as export5 } from "./lib.es2015";
export { default as export6 } from "./lib.es2015.generator";
export { default as export7 } from "./lib.es2015.iterable";
export { default as export8 } from "./lib.es2015.promise";
export { default as export9 } from "./lib.es2015.proxy";
export { default as export10 } from "./lib.es2015.reflect";
export { default as export11 } from "./lib.es2015.symbol";
export { default as export12 } from "./lib.es2015.symbol.wellknown";
export { default as export13 } from "./lib.es2016.array.include";
export { default as export14 } from "./lib.es2016";
export { default as export15 } from "./lib.es2016.full";
export { default as export16 } from "./lib.es2017";
export { default as export17 } from "./lib.es2017.full";
export { default as export18 } from "./lib.es2017.intl";
export { default as export19 } from "./lib.es2017.object";
export { default as export20 } from "./lib.es2017.sharedmemory";
export { default as export21 } from "./lib.es2017.string";
export { default as export22 } from "./lib.es2017.typedarrays";
export { default as export23 } from "./lib.es2018.asyncgenerator";
export { default as export24 } from "./lib.es2018.asynciterable";
export { default as export25 } from "./lib.es2018";
export { default as export26 } from "./lib.es2018.full";
export { default as export27 } from "./lib.es2018.intl";
export { default as export28 } from "./lib.es2018.promise";
export { default as export29 } from "./lib.es2018.regexp";
export { default as export30 } from "./lib.es2019.array";
export { default as export31 } from "./lib.es2019";
export { default as export32 } from "./lib.es2019.full";
export { default as export33 } from "./lib.es2019.object";
export { default as export34 } from "./lib.es2019.string";
export { default as export35 } from "./lib.es2019.symbol";
export { default as export36 } from "./lib.es2020.bigint";
export { default as export37 } from "./lib.es2020";
export { default as export38 } from "./lib.es2020.full";
export { default as export39 } from "./lib.es2020.intl";
export { default as export40 } from "./lib.es2020.promise";
export { default as export41 } from "./lib.es2020.string";
export { default as export42 } from "./lib.es2020.symbol.wellknown";
export { default as export43 } from "./lib.es5";
export { default as export44 } from "./lib.es6";
export { default as export45 } from "./lib.esnext";
export { default as export46 } from "./lib.esnext.full";
export { default as export47 } from "./lib.esnext.intl";
export { default as export48 } from "./lib.esnext.promise";
export { default as export49 } from "./lib.esnext.string";
export { default as export50 } from "./lib.scripthost";
export { default as export51 } from "./lib.webworker";
export { default as export52 } from "./lib.webworker.importscripts";
|
{
"pile_set_name": "Github"
}
|
<script type="text/javascript">
$(function() {
$('#select_page_id').on('change', function() {
show_field($(this));
});
show_field($('#select_page_id'));
})
function show_field($select) {
if($select.val() == 0)
$('#page_level_container').show();
else
$('#page_level_container').hide();
}
</script>
<div class="panel-body">
<div class="form-group">
<label class="control-label col-md-3"><?php echo __('Page'); ?></label>
<div class="col-md-4">
<?php echo Form::select('page_id', $select, $widget->page_id, array('id' => 'select_page_id')); ?>
</div>
</div>
<div class="form-group" id="page_level_container">
<label class="control-label col-md-3" for="page_level"><?php echo __('Select page level'); ?></label>
<div class="col-md-2">
<?php echo Form::input('page_level', $widget->page_level, array('id' => 'page_level', 'class' => 'form-control')); ?>
</div>
</div>
<div class="form-group">
<div class="col-md-offset-3 col-md-9">
<div class="checkbox">
<label><?php echo Form::checkbox('match_all_paths', 1, $widget->match_all_paths == 1); ?> <?php echo __('Match All Pages within Given Deepness'); ?></label>
</div>
<div class="checkbox">
<label><?php echo Form::checkbox('include_hidden', 1, $widget->include_hidden == 1); ?> <?php echo __('Include hidden pages'); ?></label>
</div>
</div>
</div>
</div>
<div class="panel-heading">
<span class="panel-title"><?php echo __('Exclude pages'); ?></span>
</div>
<table class="table table-noborder table-striped">
<colgroup>
<col width="50px" />
<col width="450px" />
<col />
</colgroup>
<thead>
<tr>
<th></th>
<th></th>
<th><?php echo __('Fetched widget'); ?></th>
</tr>
</thead>
<tbody>
<?php foreach($pages as $page): ?>
<tr>
<?php if($page['id'] > 1): ?>
<td class="text-right">
<?php echo Form::checkbox('exclude[]', $page['id'], in_array($page['id'], $widget->exclude), array('id' => 'page'.$page['id'])); ?>
</td>
<th><label for="page<?php echo $page['id']; ?>"><?php echo str_repeat(' ', $page['level'] * 10) . $page['title']; ?></label></th>
<?php else: ?>
<td></td>
<th><?php echo $page['title']; ?></th>
<?php endif; ?>
<td>
<?php
$widgets = Widget_Manager::get_related(array());
if (isset($widgets[$widget->id])) unset($widgets[$widget->id]);
if (!empty($widgets))
{
$widgets = array(__('--- Not set ---')) + $widgets;
$selected = Arr::get($widget->fetched_widgets, $page['id']);
echo Form::select('fetched_widgets['.$page['id'].']', $widgets, $selected, array(
'class' => 'form-control'
));
}
?>
</td>
</tr>
<?php endforeach; ?>
</tbody>
</table>
|
{
"pile_set_name": "Github"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.dense_shape],
lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
def register_session_run_conversion_functions(tensor_type, fetch_function,
feed_function=None, feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds
of one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError(
'%s has already been registered so ignore it.', tensor_type)
return
_REGISTERED_EXPANSIONS.insert(0,
(tensor_type, fetch_function, feed_function, feed_function_for_partial_run))
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if self._fetch_type == list:
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch)
for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i] in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i]].eval()
else:
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d)' % (self.name, self.device_type,
self.memory_limit_bytes,)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s'
% type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
# pylint: disable=protected-access
# We cache _USE_C_API's value because some test cases will create a session
# with _USE_C_API = False but set it back to True before calling close().
self._created_with_new_api = ops._USE_C_API
# pylint: enable=protected-access
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSession(self._graph._c_graph, opts,
status)
# pylint: enable=protected-access
else:
self._session = tf_session.TF_NewDeprecatedSession(opts, status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Each element in the list has the following properties:
- `name`: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
- `device_type`: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
- `memory_limit`: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
raw_device_list = tf_session.TF_SessionListDevices(
self._session, status)
else:
raw_device_list = tf_session.TF_DeprecatedSessionListDevices(
self._session, status)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i, status)
device_type = tf_session.TF_DeviceListType(raw_device_list, i, status)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i, status)
device_list.append(_DeviceAttributes(name, device_type, memory))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._created_with_new_api:
if self._session and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseSession(self._session, status)
else:
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseDeprecatedSession(self._session, status)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
status = c_api_util.ScopedTFStatus()
if self._created_with_new_api:
tf_session.TF_DeleteSession(self._session, status)
else:
tf_session.TF_DeleteDeprecatedSession(self._session, status)
except AttributeError:
# At shutdown, `c_api_util` or `tf_session` may have been garbage
# collected, causing the above method calls to fail. In this case,
# silently leak since the program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
@{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use @{tf.get_default_session}.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of @{tf.get_default_graph},
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* An @{tf.Operation}.
The corresponding fetched value will be `None`.
* A @{tf.Tensor}.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A @{tf.SparseTensor}.
The corresponding fetched value will be a
@{tf.SparseTensorValue}
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a @{tf.Tensor}, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
@{tf.placeholder}, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
@{tf.SparseTensor},
the value should be a
@{tf.SparseTensorValue}.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
if self._created_with_new_api:
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
else:
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionPRunSetup_wrapper(
session, feed_list, fetch_list, target_list, status)
else:
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list, status)
if self._created_with_new_api:
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
else:
final_fetches = _name_list(fetch_handler.fetches())
final_targets = _name_list(fetch_handler.targets())
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val,
int) and subfeed_dtype(subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' is not'
' compatible with Tensor type ' + str(subfeed_dtype) + '.'
' Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self,
fetches,
feed_list=None,
accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
@{tf.Session.run} for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See @{tf.Session.run}
for details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See
@{tf.Session.run} for details of the allowable feed key types.
accept_options: (Optional.) Iff `True`, the returned `Callable` will be
able to accept @{tf.RunOptions} and @{tf.RunMetadata} as optional
keyword arguments `options` and `run_metadata`, respectively, with
the same syntax and semantics as @{tf.Session.run}, which is useful
for certain use cases (profiling and debugging) but will result in
measurable slowdown of the `Callable`'s performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to @{tf.Session.run}.
"""
assert not self._created_with_new_api, ('session.make_callable() doesn\'t '
'work with C API')
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {feed: feed_val
for feed, feed_val in zip(feed_list, feed_args)}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
fetch_list_as_strings = _name_list(fetch_handler.fetches())
target_list_as_strings = _name_list(fetch_handler.targets())
def _callable_template_with_options_and_metadata(
fetch_list_as_strings,
target_list_as_strings,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(
self._session, options_ptr, {}, fetch_list_as_strings,
target_list_as_strings, status, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(
_callable_template_with_options_and_metadata, fetch_list_as_strings,
target_list_as_strings, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list_as_strings
assert len(target_list_as_strings) == 1
def _single_operation_run():
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_Run(self._session, None, {}, [],
target_list_as_strings, status, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list_as_strings) == 1
assert not target_list_as_strings
def _single_tensor_run():
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(self._session, None, {},
fetch_list_as_strings, [], status, None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
with errors.raise_exception_on_not_ok_status() as status:
results = tf_session.TF_Run(self._session, None, {},
fetch_list_as_strings,
target_list_as_strings, status, None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict,
options, run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
if self._created_with_new_api:
# pylint: disable=protected-access
feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
else:
feeds = dict((compat.as_bytes(t.name), v) for t, v in feed_dict.items())
fetches = _name_list(fetch_list)
targets = _name_list(target_list)
def _run_fn(session, feed_dict, fetch_list, target_list, options,
run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionRun_wrapper(
session, options, feed_dict, fetch_list, target_list,
run_metadata, status)
else:
return tf_session.TF_Run(session, options,
feed_dict, fetch_list, target_list,
status, run_metadata)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
with errors.raise_exception_on_not_ok_status() as status:
if self._created_with_new_api:
return tf_session.TF_SessionPRun_wrapper(session, handle, feed_dict,
fetch_list, status)
else:
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,
status)
if handle is None:
return self._do_call(_run_fn, self._session, feeds, fetches, targets,
options, run_metadata)
else:
return self._do_call(_prun_fn, self._session, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
# Nothing to do if we're using the new session interface
# TODO(skyewm): remove this function altogether eventually
if self._created_with_new_api: return
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
# pylint: disable=protected-access
graph_def, self._current_version = self._graph._as_graph_def(
from_version=self._current_version,
add_shapes=self._add_shapes)
# pylint: enable=protected-access
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
self._opened = True
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(self.graph,
deleter_key,
tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor] = np_val
return handles
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
@{tf.Variable}, @{tf.QueueBase},
and @{tf.ReaderBase}. It is important to release
these resources when they are no longer required. To do this, either
invoke the @{tf.Session.close} method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See
@{$distributed$Distributed TensorFlow}
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods @{tf.Tensor.eval}
and @{tf.Operation.run}
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
|
{
"pile_set_name": "Github"
}
|
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using NLog;
using NzbDrone.Common.Disk;
using NzbDrone.Common.Extensions;
using NzbDrone.Common.Http;
using NzbDrone.Core.Configuration;
using NzbDrone.Core.Extras.Files;
using NzbDrone.Core.Extras.Metadata.Files;
using NzbDrone.Core.Extras.Others;
using NzbDrone.Core.MediaFiles;
using NzbDrone.Core.Tv;
namespace NzbDrone.Core.Extras.Metadata
{
public class MetadataService : ExtraFileManager<MetadataFile>
{
private readonly IMetadataFactory _metadataFactory;
private readonly ICleanMetadataService _cleanMetadataService;
private readonly IRecycleBinProvider _recycleBinProvider;
private readonly IOtherExtraFileRenamer _otherExtraFileRenamer;
private readonly IDiskTransferService _diskTransferService;
private readonly IDiskProvider _diskProvider;
private readonly IHttpClient _httpClient;
private readonly IMediaFileAttributeService _mediaFileAttributeService;
private readonly IMetadataFileService _metadataFileService;
private readonly Logger _logger;
public MetadataService(IConfigService configService,
IDiskProvider diskProvider,
IDiskTransferService diskTransferService,
IRecycleBinProvider recycleBinProvider,
IOtherExtraFileRenamer otherExtraFileRenamer,
IMetadataFactory metadataFactory,
ICleanMetadataService cleanMetadataService,
IHttpClient httpClient,
IMediaFileAttributeService mediaFileAttributeService,
IMetadataFileService metadataFileService,
Logger logger)
: base(configService, diskProvider, diskTransferService, logger)
{
_metadataFactory = metadataFactory;
_cleanMetadataService = cleanMetadataService;
_otherExtraFileRenamer = otherExtraFileRenamer;
_recycleBinProvider = recycleBinProvider;
_diskTransferService = diskTransferService;
_diskProvider = diskProvider;
_httpClient = httpClient;
_mediaFileAttributeService = mediaFileAttributeService;
_metadataFileService = metadataFileService;
_logger = logger;
}
public override int Order => 0;
public override IEnumerable<ExtraFile> CreateAfterMediaCoverUpdate(Series series)
{
var metadataFiles = _metadataFileService.GetFilesBySeries(series.Id);
_cleanMetadataService.Clean(series);
if (!_diskProvider.FolderExists(series.Path))
{
_logger.Info("Series folder does not exist, skipping metadata image creation");
return Enumerable.Empty<MetadataFile>();
}
var files = new List<MetadataFile>();
foreach (var consumer in _metadataFactory.Enabled())
{
var consumerFiles = GetMetadataFilesForConsumer(consumer, metadataFiles);
files.AddRange(ProcessSeriesImages(consumer, series, consumerFiles));
}
_metadataFileService.Upsert(files);
return files;
}
public override IEnumerable<ExtraFile> CreateAfterSeriesScan(Series series, List<EpisodeFile> episodeFiles)
{
var metadataFiles = _metadataFileService.GetFilesBySeries(series.Id);
_cleanMetadataService.Clean(series);
if (!_diskProvider.FolderExists(series.Path))
{
_logger.Info("Series folder does not exist, skipping metadata creation");
return Enumerable.Empty<MetadataFile>();
}
var files = new List<MetadataFile>();
foreach (var consumer in _metadataFactory.Enabled())
{
var consumerFiles = GetMetadataFilesForConsumer(consumer, metadataFiles);
files.AddIfNotNull(ProcessSeriesMetadata(consumer, series, consumerFiles));
files.AddRange(ProcessSeriesImages(consumer, series, consumerFiles));
files.AddRange(ProcessSeasonImages(consumer, series, consumerFiles));
foreach (var episodeFile in episodeFiles)
{
files.AddIfNotNull(ProcessEpisodeMetadata(consumer, series, episodeFile, consumerFiles));
files.AddRange(ProcessEpisodeImages(consumer, series, episodeFile, consumerFiles));
}
}
_metadataFileService.Upsert(files);
return files;
}
public override IEnumerable<ExtraFile> CreateAfterEpisodeImport(Series series, EpisodeFile episodeFile)
{
var files = new List<MetadataFile>();
foreach (var consumer in _metadataFactory.Enabled())
{
files.AddIfNotNull(ProcessEpisodeMetadata(consumer, series, episodeFile, new List<MetadataFile>()));
files.AddRange(ProcessEpisodeImages(consumer, series, episodeFile, new List<MetadataFile>()));
}
_metadataFileService.Upsert(files);
return files;
}
public override IEnumerable<ExtraFile> CreateAfterEpisodeFolder(Series series, string seriesFolder, string seasonFolder)
{
var metadataFiles = _metadataFileService.GetFilesBySeries(series.Id);
if (seriesFolder.IsNullOrWhiteSpace() && seasonFolder.IsNullOrWhiteSpace())
{
return new List<MetadataFile>();
}
var files = new List<MetadataFile>();
foreach (var consumer in _metadataFactory.Enabled())
{
var consumerFiles = GetMetadataFilesForConsumer(consumer, metadataFiles);
if (seriesFolder.IsNotNullOrWhiteSpace())
{
files.AddIfNotNull(ProcessSeriesMetadata(consumer, series, consumerFiles));
files.AddRange(ProcessSeriesImages(consumer, series, consumerFiles));
}
if (seasonFolder.IsNotNullOrWhiteSpace())
{
files.AddRange(ProcessSeasonImages(consumer, series, consumerFiles));
}
}
_metadataFileService.Upsert(files);
return files;
}
public override IEnumerable<ExtraFile> MoveFilesAfterRename(Series series, List<EpisodeFile> episodeFiles)
{
var metadataFiles = _metadataFileService.GetFilesBySeries(series.Id);
var movedFiles = new List<MetadataFile>();
// TODO: Move EpisodeImage and EpisodeMetadata metadata files, instead of relying on consumers to do it
// (Xbmc's EpisodeImage is more than just the extension)
foreach (var consumer in _metadataFactory.GetAvailableProviders())
{
foreach (var episodeFile in episodeFiles)
{
var metadataFilesForConsumer = GetMetadataFilesForConsumer(consumer, metadataFiles).Where(m => m.EpisodeFileId == episodeFile.Id).ToList();
foreach (var metadataFile in metadataFilesForConsumer)
{
var newFileName = consumer.GetFilenameAfterMove(series, episodeFile, metadataFile);
var existingFileName = Path.Combine(series.Path, metadataFile.RelativePath);
if (newFileName.PathNotEquals(existingFileName))
{
try
{
_diskProvider.MoveFile(existingFileName, newFileName);
metadataFile.RelativePath = series.Path.GetRelativePath(newFileName);
movedFiles.Add(metadataFile);
}
catch (Exception ex)
{
_logger.Warn(ex, "Unable to move metadata file after rename: {0}", existingFileName);
}
}
}
}
}
_metadataFileService.Upsert(movedFiles);
return movedFiles;
}
public override ExtraFile Import(Series series, EpisodeFile episodeFile, string path, string extension, bool readOnly)
{
return null;
}
private List<MetadataFile> GetMetadataFilesForConsumer(IMetadata consumer, List<MetadataFile> seriesMetadata)
{
return seriesMetadata.Where(c => c.Consumer == consumer.GetType().Name).ToList();
}
private MetadataFile ProcessSeriesMetadata(IMetadata consumer, Series series, List<MetadataFile> existingMetadataFiles)
{
var seriesMetadata = consumer.SeriesMetadata(series);
if (seriesMetadata == null)
{
return null;
}
var hash = seriesMetadata.Contents.SHA256Hash();
var metadata = GetMetadataFile(series, existingMetadataFiles, e => e.Type == MetadataType.SeriesMetadata) ??
new MetadataFile
{
SeriesId = series.Id,
Consumer = consumer.GetType().Name,
Type = MetadataType.SeriesMetadata
};
if (hash == metadata.Hash)
{
if (seriesMetadata.RelativePath != metadata.RelativePath)
{
metadata.RelativePath = seriesMetadata.RelativePath;
return metadata;
}
return null;
}
var fullPath = Path.Combine(series.Path, seriesMetadata.RelativePath);
_logger.Debug("Writing Series Metadata to: {0}", fullPath);
SaveMetadataFile(fullPath, seriesMetadata.Contents);
metadata.Hash = hash;
metadata.RelativePath = seriesMetadata.RelativePath;
metadata.Extension = Path.GetExtension(fullPath);
return metadata;
}
private MetadataFile ProcessEpisodeMetadata(IMetadata consumer, Series series, EpisodeFile episodeFile, List<MetadataFile> existingMetadataFiles)
{
var episodeMetadata = consumer.EpisodeMetadata(series, episodeFile);
if (episodeMetadata == null)
{
return null;
}
var fullPath = Path.Combine(series.Path, episodeMetadata.RelativePath);
_otherExtraFileRenamer.RenameOtherExtraFile(series, fullPath);
var existingMetadata = GetMetadataFile(series, existingMetadataFiles, c => c.Type == MetadataType.EpisodeMetadata &&
c.EpisodeFileId == episodeFile.Id);
if (existingMetadata != null)
{
var existingFullPath = Path.Combine(series.Path, existingMetadata.RelativePath);
if (fullPath.PathNotEquals(existingFullPath))
{
_diskTransferService.TransferFile(existingFullPath, fullPath, TransferMode.Move);
existingMetadata.RelativePath = episodeMetadata.RelativePath;
}
}
var hash = episodeMetadata.Contents.SHA256Hash();
var metadata = existingMetadata ??
new MetadataFile
{
SeriesId = series.Id,
SeasonNumber = episodeFile.SeasonNumber,
EpisodeFileId = episodeFile.Id,
Consumer = consumer.GetType().Name,
Type = MetadataType.EpisodeMetadata,
RelativePath = episodeMetadata.RelativePath,
Extension = Path.GetExtension(fullPath)
};
if (hash == metadata.Hash)
{
return null;
}
_logger.Debug("Writing Episode Metadata to: {0}", fullPath);
SaveMetadataFile(fullPath, episodeMetadata.Contents);
metadata.Hash = hash;
return metadata;
}
private List<MetadataFile> ProcessSeriesImages(IMetadata consumer, Series series, List<MetadataFile> existingMetadataFiles)
{
var result = new List<MetadataFile>();
foreach (var image in consumer.SeriesImages(series))
{
var fullPath = Path.Combine(series.Path, image.RelativePath);
if (_diskProvider.FileExists(fullPath))
{
_logger.Debug("Series image already exists: {0}", fullPath);
continue;
}
_otherExtraFileRenamer.RenameOtherExtraFile(series, fullPath);
var metadata = GetMetadataFile(series, existingMetadataFiles, c => c.Type == MetadataType.SeriesImage &&
c.RelativePath == image.RelativePath) ??
new MetadataFile
{
SeriesId = series.Id,
Consumer = consumer.GetType().Name,
Type = MetadataType.SeriesImage,
RelativePath = image.RelativePath,
Extension = Path.GetExtension(fullPath)
};
DownloadImage(series, image);
result.Add(metadata);
}
return result;
}
private List<MetadataFile> ProcessSeasonImages(IMetadata consumer, Series series, List<MetadataFile> existingMetadataFiles)
{
var result = new List<MetadataFile>();
foreach (var season in series.Seasons)
{
foreach (var image in consumer.SeasonImages(series, season))
{
var fullPath = Path.Combine(series.Path, image.RelativePath);
if (_diskProvider.FileExists(fullPath))
{
_logger.Debug("Season image already exists: {0}", fullPath);
continue;
}
_otherExtraFileRenamer.RenameOtherExtraFile(series, fullPath);
var metadata = GetMetadataFile(series, existingMetadataFiles, c => c.Type == MetadataType.SeasonImage &&
c.SeasonNumber == season.SeasonNumber &&
c.RelativePath == image.RelativePath) ??
new MetadataFile
{
SeriesId = series.Id,
SeasonNumber = season.SeasonNumber,
Consumer = consumer.GetType().Name,
Type = MetadataType.SeasonImage,
RelativePath = image.RelativePath,
Extension = Path.GetExtension(fullPath)
};
DownloadImage(series, image);
result.Add(metadata);
}
}
return result;
}
private List<MetadataFile> ProcessEpisodeImages(IMetadata consumer, Series series, EpisodeFile episodeFile, List<MetadataFile> existingMetadataFiles)
{
var result = new List<MetadataFile>();
foreach (var image in consumer.EpisodeImages(series, episodeFile))
{
var fullPath = Path.Combine(series.Path, image.RelativePath);
if (_diskProvider.FileExists(fullPath))
{
_logger.Debug("Episode image already exists: {0}", fullPath);
continue;
}
_otherExtraFileRenamer.RenameOtherExtraFile(series, fullPath);
var existingMetadata = GetMetadataFile(series, existingMetadataFiles, c => c.Type == MetadataType.EpisodeImage &&
c.EpisodeFileId == episodeFile.Id);
if (existingMetadata != null)
{
var existingFullPath = Path.Combine(series.Path, existingMetadata.RelativePath);
if (fullPath.PathNotEquals(existingFullPath))
{
_diskTransferService.TransferFile(existingFullPath, fullPath, TransferMode.Move);
existingMetadata.RelativePath = image.RelativePath;
return new List<MetadataFile>{ existingMetadata };
}
}
var metadata = existingMetadata ??
new MetadataFile
{
SeriesId = series.Id,
SeasonNumber = episodeFile.SeasonNumber,
EpisodeFileId = episodeFile.Id,
Consumer = consumer.GetType().Name,
Type = MetadataType.EpisodeImage,
RelativePath = image.RelativePath,
Extension = Path.GetExtension(fullPath)
};
DownloadImage(series, image);
result.Add(metadata);
}
return result;
}
private void DownloadImage(Series series, ImageFileResult image)
{
var fullPath = Path.Combine(series.Path, image.RelativePath);
try
{
if (image.Url.StartsWith("http"))
{
_httpClient.DownloadFile(image.Url, fullPath);
}
else
{
_diskProvider.CopyFile(image.Url, fullPath);
}
_mediaFileAttributeService.SetFilePermissions(fullPath);
}
catch (HttpException ex)
{
_logger.Warn(ex, "Couldn't download image {0} for {1}. {2}", image.Url, series, ex.Message);
}
catch (WebException ex)
{
_logger.Warn(ex, "Couldn't download image {0} for {1}. {2}", image.Url, series, ex.Message);
}
catch (Exception ex)
{
_logger.Error(ex, "Couldn't download image {0} for {1}. {2}", image.Url, series, ex.Message);
}
}
private void SaveMetadataFile(string path, string contents)
{
_diskProvider.WriteAllText(path, contents);
_mediaFileAttributeService.SetFilePermissions(path);
}
private MetadataFile GetMetadataFile(Series series, List<MetadataFile> existingMetadataFiles, Func<MetadataFile, bool> predicate)
{
var matchingMetadataFiles = existingMetadataFiles.Where(predicate).ToList();
if (matchingMetadataFiles.Empty())
{
return null;
}
//Remove duplicate metadata files from DB and disk
foreach (var file in matchingMetadataFiles.Skip(1))
{
var path = Path.Combine(series.Path, file.RelativePath);
_logger.Debug("Removing duplicate Metadata file: {0}", path);
var subfolder = _diskProvider.GetParentFolder(series.Path).GetRelativePath(_diskProvider.GetParentFolder(path));
_recycleBinProvider.DeleteFile(path, subfolder);
_metadataFileService.Delete(file.Id);
}
return matchingMetadataFiles.First();
}
}
}
|
{
"pile_set_name": "Github"
}
|
#include <fltKernel.h>
#include "FilesAPI.h"
FilesAPI::FilesAPI(
LPCWSTR FilePath,
CREATE_FILE_TYPE Type,
ACCESS_MASK AccessMask,
ULONG ShareAccess
) : hFile(NULL) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, FilePath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL
);
IO_STATUS_BLOCK IoStatusBlock = {};
LARGE_INTEGER AllocationSize = {};
ULONG CreateDisposition = FILE_OVERWRITE;
switch (Type) {
case fCreateEmpty:
CreateDisposition = FILE_OVERWRITE_IF;
break;
case fOpenExisting:
CreateDisposition = FILE_OPEN;
break;
case fOpenOrCreate:
CreateDisposition = FILE_OPEN_IF;
break;
}
CreationStatus = ZwCreateFile(
&hFile,
AccessMask,
&ObjectAttributes,
&IoStatusBlock,
&AllocationSize,
FILE_ATTRIBUTE_NORMAL,
ShareAccess,
CreateDisposition,
FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT,
NULL,
0
);
}
NTSTATUS FilesAPI::Read(OUT PVOID Buffer, ULONG Size, OPTIONAL UINT64 Offset) const {
IO_STATUS_BLOCK IoStatusBlock = {};
return ZwReadFile(hFile, NULL, NULL, NULL, &IoStatusBlock, Buffer, Size, reinterpret_cast<PLARGE_INTEGER>(&Offset), NULL);
}
NTSTATUS FilesAPI::Write(IN PVOID Buffer, ULONG Size, OPTIONAL UINT64 Offset) const {
IO_STATUS_BLOCK IoStatusBlock = {};
return ZwWriteFile(hFile, NULL, NULL, NULL, &IoStatusBlock, Buffer, Size, reinterpret_cast<PLARGE_INTEGER>(&Offset), NULL);
}
NTSTATUS FilesAPI::Close() {
NTSTATUS Status = hFile ? ZwClose(hFile) : STATUS_SUCCESS;
hFile = NULL;
return Status;
}
NTSTATUS FilesAPI::CreateDir(LPCWSTR DirPath) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, DirPath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL
);
IO_STATUS_BLOCK IoStatusBlock = {};
LARGE_INTEGER AllocationSize = {};
HANDLE hDir = NULL;
NTSTATUS Status = ZwCreateFile(
&hDir,
SYNCHRONIZE,
&ObjectAttributes,
&IoStatusBlock,
&AllocationSize,
FILE_ATTRIBUTE_NORMAL,
0, // Non-shared access
FILE_CREATE,
FILE_DIRECTORY_FILE,
NULL,
0
);
if (NT_SUCCESS(Status) && hDir) ZwClose(hDir);
return Status;
}
NTSTATUS FilesAPI::DeleteFile(LPCWSTR FilePath) {
UNICODE_STRING Path;
RtlInitUnicodeString(&Path, FilePath);
OBJECT_ATTRIBUTES ObjectAttributes;
InitializeObjectAttributes(
&ObjectAttributes,
&Path,
OBJ_CASE_INSENSITIVE,
NULL,
NULL
);
return ZwDeleteFile(&ObjectAttributes);
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package windows
import "syscall"
const (
// Windows errors.
ERROR_FILE_NOT_FOUND syscall.Errno = 2
ERROR_PATH_NOT_FOUND syscall.Errno = 3
ERROR_ACCESS_DENIED syscall.Errno = 5
ERROR_NO_MORE_FILES syscall.Errno = 18
ERROR_HANDLE_EOF syscall.Errno = 38
ERROR_NETNAME_DELETED syscall.Errno = 64
ERROR_FILE_EXISTS syscall.Errno = 80
ERROR_BROKEN_PIPE syscall.Errno = 109
ERROR_BUFFER_OVERFLOW syscall.Errno = 111
ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122
ERROR_MOD_NOT_FOUND syscall.Errno = 126
ERROR_PROC_NOT_FOUND syscall.Errno = 127
ERROR_ALREADY_EXISTS syscall.Errno = 183
ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203
ERROR_MORE_DATA syscall.Errno = 234
ERROR_OPERATION_ABORTED syscall.Errno = 995
ERROR_IO_PENDING syscall.Errno = 997
ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066
ERROR_NOT_FOUND syscall.Errno = 1168
ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
WSAEACCES syscall.Errno = 10013
WSAECONNRESET syscall.Errno = 10054
)
const (
// Invented values to support what package os expects.
O_RDONLY = 0x00000
O_WRONLY = 0x00001
O_RDWR = 0x00002
O_CREAT = 0x00040
O_EXCL = 0x00080
O_NOCTTY = 0x00100
O_TRUNC = 0x00200
O_NONBLOCK = 0x00800
O_APPEND = 0x00400
O_SYNC = 0x01000
O_ASYNC = 0x02000
O_CLOEXEC = 0x80000
)
const (
// More invented values for signals
SIGHUP = Signal(0x1)
SIGINT = Signal(0x2)
SIGQUIT = Signal(0x3)
SIGILL = Signal(0x4)
SIGTRAP = Signal(0x5)
SIGABRT = Signal(0x6)
SIGBUS = Signal(0x7)
SIGFPE = Signal(0x8)
SIGKILL = Signal(0x9)
SIGSEGV = Signal(0xb)
SIGPIPE = Signal(0xd)
SIGALRM = Signal(0xe)
SIGTERM = Signal(0xf)
)
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/breakpoint trap",
6: "aborted",
7: "bus error",
8: "floating point exception",
9: "killed",
10: "user defined signal 1",
11: "segmentation fault",
12: "user defined signal 2",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
}
const (
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
FILE_LIST_DIRECTORY = 0x00000001
FILE_APPEND_DATA = 0x00000004
FILE_WRITE_ATTRIBUTES = 0x00000100
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
INVALID_FILE_ATTRIBUTES = 0xffffffff
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
HANDLE_FLAG_INHERIT = 0x00000001
STARTF_USESTDHANDLES = 0x00000100
STARTF_USESHOWWINDOW = 0x00000001
DUPLICATE_CLOSE_SOURCE = 0x00000001
DUPLICATE_SAME_ACCESS = 0x00000002
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FILE_BEGIN = 0
FILE_CURRENT = 1
FILE_END = 2
LANG_ENGLISH = 0x09
SUBLANG_ENGLISH_US = 0x01
FORMAT_MESSAGE_ALLOCATE_BUFFER = 256
FORMAT_MESSAGE_IGNORE_INSERTS = 512
FORMAT_MESSAGE_FROM_STRING = 1024
FORMAT_MESSAGE_FROM_HMODULE = 2048
FORMAT_MESSAGE_FROM_SYSTEM = 4096
FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192
FORMAT_MESSAGE_MAX_WIDTH_MASK = 255
MAX_PATH = 260
MAX_LONG_PATH = 32768
MAX_COMPUTERNAME_LENGTH = 15
TIME_ZONE_ID_UNKNOWN = 0
TIME_ZONE_ID_STANDARD = 1
TIME_ZONE_ID_DAYLIGHT = 2
IGNORE = 0
INFINITE = 0xffffffff
WAIT_TIMEOUT = 258
WAIT_ABANDONED = 0x00000080
WAIT_OBJECT_0 = 0x00000000
WAIT_FAILED = 0xFFFFFFFF
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_UNICODE_ENVIRONMENT = 0x00000400
PROCESS_TERMINATE = 1
PROCESS_QUERY_INFORMATION = 0x00000400
SYNCHRONIZE = 0x00100000
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
FILE_MAP_COPY = 0x01
FILE_MAP_WRITE = 0x02
FILE_MAP_READ = 0x04
FILE_MAP_EXECUTE = 0x20
CTRL_C_EVENT = 0
CTRL_BREAK_EVENT = 1
// Windows reserves errors >= 1<<29 for application use.
APPLICATION_ERROR = 1 << 29
)
const (
// flags for CreateToolhelp32Snapshot
TH32CS_SNAPHEAPLIST = 0x01
TH32CS_SNAPPROCESS = 0x02
TH32CS_SNAPTHREAD = 0x04
TH32CS_SNAPMODULE = 0x08
TH32CS_SNAPMODULE32 = 0x10
TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD
TH32CS_INHERIT = 0x80000000
)
const (
// filters for ReadDirectoryChangesW
FILE_NOTIFY_CHANGE_FILE_NAME = 0x001
FILE_NOTIFY_CHANGE_DIR_NAME = 0x002
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004
FILE_NOTIFY_CHANGE_SIZE = 0x008
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
FILE_NOTIFY_CHANGE_CREATION = 0x040
FILE_NOTIFY_CHANGE_SECURITY = 0x100
)
const (
// do not reorder
FILE_ACTION_ADDED = iota + 1
FILE_ACTION_REMOVED
FILE_ACTION_MODIFIED
FILE_ACTION_RENAMED_OLD_NAME
FILE_ACTION_RENAMED_NEW_NAME
)
const (
// wincrypt.h
PROV_RSA_FULL = 1
PROV_RSA_SIG = 2
PROV_DSS = 3
PROV_FORTEZZA = 4
PROV_MS_EXCHANGE = 5
PROV_SSL = 6
PROV_RSA_SCHANNEL = 12
PROV_DSS_DH = 13
PROV_EC_ECDSA_SIG = 14
PROV_EC_ECNRA_SIG = 15
PROV_EC_ECDSA_FULL = 16
PROV_EC_ECNRA_FULL = 17
PROV_DH_SCHANNEL = 18
PROV_SPYRUS_LYNKS = 20
PROV_RNG = 21
PROV_INTEL_SEC = 22
PROV_REPLACE_OWF = 23
PROV_RSA_AES = 24
CRYPT_VERIFYCONTEXT = 0xF0000000
CRYPT_NEWKEYSET = 0x00000008
CRYPT_DELETEKEYSET = 0x00000010
CRYPT_MACHINE_KEYSET = 0x00000020
CRYPT_SILENT = 0x00000040
CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080
USAGE_MATCH_TYPE_AND = 0
USAGE_MATCH_TYPE_OR = 1
X509_ASN_ENCODING = 0x00000001
PKCS_7_ASN_ENCODING = 0x00010000
CERT_STORE_PROV_MEMORY = 2
CERT_STORE_ADD_ALWAYS = 4
CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004
CERT_TRUST_NO_ERROR = 0x00000000
CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001
CERT_TRUST_IS_REVOKED = 0x00000004
CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008
CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010
CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020
CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040
CERT_TRUST_IS_CYCLIC = 0x00000080
CERT_TRUST_INVALID_EXTENSION = 0x00000100
CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200
CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400
CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800
CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000
CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000
CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000
CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000
CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000
CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000
CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000
CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000
CERT_CHAIN_POLICY_BASE = 1
CERT_CHAIN_POLICY_AUTHENTICODE = 2
CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3
CERT_CHAIN_POLICY_SSL = 4
CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5
CERT_CHAIN_POLICY_NT_AUTH = 6
CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7
CERT_CHAIN_POLICY_EV = 8
CERT_E_EXPIRED = 0x800B0101
CERT_E_ROLE = 0x800B0103
CERT_E_PURPOSE = 0x800B0106
CERT_E_UNTRUSTEDROOT = 0x800B0109
CERT_E_CN_NO_MATCH = 0x800B010F
AUTHTYPE_CLIENT = 1
AUTHTYPE_SERVER = 2
)
var (
OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00")
OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00")
OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00")
)
// Invented values to support what package os expects.
type Timeval struct {
Sec int32
Usec int32
}
func (tv *Timeval) Nanoseconds() int64 {
return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3
}
func NsecToTimeval(nsec int64) (tv Timeval) {
tv.Sec = int32(nsec / 1e9)
tv.Usec = int32(nsec % 1e9 / 1e3)
return
}
type SecurityAttributes struct {
Length uint32
SecurityDescriptor uintptr
InheritHandle uint32
}
type Overlapped struct {
Internal uintptr
InternalHigh uintptr
Offset uint32
OffsetHigh uint32
HEvent Handle
}
type FileNotifyInformation struct {
NextEntryOffset uint32
Action uint32
FileNameLength uint32
FileName uint16
}
type Filetime struct {
LowDateTime uint32
HighDateTime uint32
}
// Nanoseconds returns Filetime ft in nanoseconds
// since Epoch (00:00:00 UTC, January 1, 1970).
func (ft *Filetime) Nanoseconds() int64 {
// 100-nanosecond intervals since January 1, 1601
nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
nsec -= 116444736000000000
// convert into nanoseconds
nsec *= 100
return nsec
}
func NsecToFiletime(nsec int64) (ft Filetime) {
// convert into 100-nanosecond
nsec /= 100
// change starting time to January 1, 1601
nsec += 116444736000000000
// split into high / low
ft.LowDateTime = uint32(nsec & 0xffffffff)
ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff)
return ft
}
type Win32finddata struct {
FileAttributes uint32
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
FileSizeHigh uint32
FileSizeLow uint32
Reserved0 uint32
Reserved1 uint32
FileName [MAX_PATH - 1]uint16
AlternateFileName [13]uint16
}
// This is the actual system call structure.
// Win32finddata is what we committed to in Go 1.
type win32finddata1 struct {
FileAttributes uint32
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
FileSizeHigh uint32
FileSizeLow uint32
Reserved0 uint32
Reserved1 uint32
FileName [MAX_PATH]uint16
AlternateFileName [14]uint16
}
func copyFindData(dst *Win32finddata, src *win32finddata1) {
dst.FileAttributes = src.FileAttributes
dst.CreationTime = src.CreationTime
dst.LastAccessTime = src.LastAccessTime
dst.LastWriteTime = src.LastWriteTime
dst.FileSizeHigh = src.FileSizeHigh
dst.FileSizeLow = src.FileSizeLow
dst.Reserved0 = src.Reserved0
dst.Reserved1 = src.Reserved1
// The src is 1 element bigger than dst, but it must be NUL.
copy(dst.FileName[:], src.FileName[:])
copy(dst.AlternateFileName[:], src.AlternateFileName[:])
}
type ByHandleFileInformation struct {
FileAttributes uint32
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
VolumeSerialNumber uint32
FileSizeHigh uint32
FileSizeLow uint32
NumberOfLinks uint32
FileIndexHigh uint32
FileIndexLow uint32
}
const (
GetFileExInfoStandard = 0
GetFileExMaxInfoLevel = 1
)
type Win32FileAttributeData struct {
FileAttributes uint32
CreationTime Filetime
LastAccessTime Filetime
LastWriteTime Filetime
FileSizeHigh uint32
FileSizeLow uint32
}
// ShowWindow constants
const (
// winuser.h
SW_HIDE = 0
SW_NORMAL = 1
SW_SHOWNORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_MAXIMIZE = 3
SW_SHOWNOACTIVATE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
SW_FORCEMINIMIZE = 11
)
type StartupInfo struct {
Cb uint32
_ *uint16
Desktop *uint16
Title *uint16
X uint32
Y uint32
XSize uint32
YSize uint32
XCountChars uint32
YCountChars uint32
FillAttribute uint32
Flags uint32
ShowWindow uint16
_ uint16
_ *byte
StdInput Handle
StdOutput Handle
StdErr Handle
}
type ProcessInformation struct {
Process Handle
Thread Handle
ProcessId uint32
ThreadId uint32
}
type ProcessEntry32 struct {
Size uint32
Usage uint32
ProcessID uint32
DefaultHeapID uintptr
ModuleID uint32
Threads uint32
ParentProcessID uint32
PriClassBase int32
Flags uint32
ExeFile [MAX_PATH]uint16
}
type Systemtime struct {
Year uint16
Month uint16
DayOfWeek uint16
Day uint16
Hour uint16
Minute uint16
Second uint16
Milliseconds uint16
}
type Timezoneinformation struct {
Bias int32
StandardName [32]uint16
StandardDate Systemtime
StandardBias int32
DaylightName [32]uint16
DaylightDate Systemtime
DaylightBias int32
}
// Socket related.
const (
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_INET6 = 23
AF_NETBIOS = 17
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_SEQPACKET = 5
IPPROTO_IP = 0
IPPROTO_IPV6 = 0x29
IPPROTO_TCP = 6
IPPROTO_UDP = 17
SOL_SOCKET = 0xffff
SO_REUSEADDR = 4
SO_KEEPALIVE = 8
SO_DONTROUTE = 16
SO_BROADCAST = 32
SO_LINGER = 128
SO_RCVBUF = 0x1002
SO_SNDBUF = 0x1001
SO_UPDATE_ACCEPT_CONTEXT = 0x700b
SO_UPDATE_CONNECT_CONTEXT = 0x7010
IOC_OUT = 0x40000000
IOC_IN = 0x80000000
IOC_VENDOR = 0x18000000
IOC_INOUT = IOC_IN | IOC_OUT
IOC_WS2 = 0x08000000
SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4
SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12
// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
IP_TOS = 0x3
IP_TTL = 0x4
IP_MULTICAST_IF = 0x9
IP_MULTICAST_TTL = 0xa
IP_MULTICAST_LOOP = 0xb
IP_ADD_MEMBERSHIP = 0xc
IP_DROP_MEMBERSHIP = 0xd
IPV6_V6ONLY = 0x1b
IPV6_UNICAST_HOPS = 0x4
IPV6_MULTICAST_IF = 0x9
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_LOOP = 0xb
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
SOMAXCONN = 0x7fffffff
TCP_NODELAY = 1
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
WSADESCRIPTION_LEN = 256
WSASYS_STATUS_LEN = 128
)
type WSABuf struct {
Len uint32
Buf *byte
}
// Invented values to support what package os expects.
const (
S_IFMT = 0x1f000
S_IFIFO = 0x1000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFBLK = 0x6000
S_IFREG = 0x8000
S_IFLNK = 0xa000
S_IFSOCK = 0xc000
S_ISUID = 0x800
S_ISGID = 0x400
S_ISVTX = 0x200
S_IRUSR = 0x100
S_IWRITE = 0x80
S_IWUSR = 0x80
S_IXUSR = 0x40
)
const (
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_DISK = 0x0001
FILE_TYPE_PIPE = 0x0003
FILE_TYPE_REMOTE = 0x8000
FILE_TYPE_UNKNOWN = 0x0000
)
type Hostent struct {
Name *byte
Aliases **byte
AddrType uint16
Length uint16
AddrList **byte
}
type Protoent struct {
Name *byte
Aliases **byte
Proto uint16
}
const (
DNS_TYPE_A = 0x0001
DNS_TYPE_NS = 0x0002
DNS_TYPE_MD = 0x0003
DNS_TYPE_MF = 0x0004
DNS_TYPE_CNAME = 0x0005
DNS_TYPE_SOA = 0x0006
DNS_TYPE_MB = 0x0007
DNS_TYPE_MG = 0x0008
DNS_TYPE_MR = 0x0009
DNS_TYPE_NULL = 0x000a
DNS_TYPE_WKS = 0x000b
DNS_TYPE_PTR = 0x000c
DNS_TYPE_HINFO = 0x000d
DNS_TYPE_MINFO = 0x000e
DNS_TYPE_MX = 0x000f
DNS_TYPE_TEXT = 0x0010
DNS_TYPE_RP = 0x0011
DNS_TYPE_AFSDB = 0x0012
DNS_TYPE_X25 = 0x0013
DNS_TYPE_ISDN = 0x0014
DNS_TYPE_RT = 0x0015
DNS_TYPE_NSAP = 0x0016
DNS_TYPE_NSAPPTR = 0x0017
DNS_TYPE_SIG = 0x0018
DNS_TYPE_KEY = 0x0019
DNS_TYPE_PX = 0x001a
DNS_TYPE_GPOS = 0x001b
DNS_TYPE_AAAA = 0x001c
DNS_TYPE_LOC = 0x001d
DNS_TYPE_NXT = 0x001e
DNS_TYPE_EID = 0x001f
DNS_TYPE_NIMLOC = 0x0020
DNS_TYPE_SRV = 0x0021
DNS_TYPE_ATMA = 0x0022
DNS_TYPE_NAPTR = 0x0023
DNS_TYPE_KX = 0x0024
DNS_TYPE_CERT = 0x0025
DNS_TYPE_A6 = 0x0026
DNS_TYPE_DNAME = 0x0027
DNS_TYPE_SINK = 0x0028
DNS_TYPE_OPT = 0x0029
DNS_TYPE_DS = 0x002B
DNS_TYPE_RRSIG = 0x002E
DNS_TYPE_NSEC = 0x002F
DNS_TYPE_DNSKEY = 0x0030
DNS_TYPE_DHCID = 0x0031
DNS_TYPE_UINFO = 0x0064
DNS_TYPE_UID = 0x0065
DNS_TYPE_GID = 0x0066
DNS_TYPE_UNSPEC = 0x0067
DNS_TYPE_ADDRS = 0x00f8
DNS_TYPE_TKEY = 0x00f9
DNS_TYPE_TSIG = 0x00fa
DNS_TYPE_IXFR = 0x00fb
DNS_TYPE_AXFR = 0x00fc
DNS_TYPE_MAILB = 0x00fd
DNS_TYPE_MAILA = 0x00fe
DNS_TYPE_ALL = 0x00ff
DNS_TYPE_ANY = 0x00ff
DNS_TYPE_WINS = 0xff01
DNS_TYPE_WINSR = 0xff02
DNS_TYPE_NBSTAT = 0xff01
)
const (
DNS_INFO_NO_RECORDS = 0x251D
)
const (
// flags inside DNSRecord.Dw
DnsSectionQuestion = 0x0000
DnsSectionAnswer = 0x0001
DnsSectionAuthority = 0x0002
DnsSectionAdditional = 0x0003
)
type DNSSRVData struct {
Target *uint16
Priority uint16
Weight uint16
Port uint16
Pad uint16
}
type DNSPTRData struct {
Host *uint16
}
type DNSMXData struct {
NameExchange *uint16
Preference uint16
Pad uint16
}
type DNSTXTData struct {
StringCount uint16
StringArray [1]*uint16
}
type DNSRecord struct {
Next *DNSRecord
Name *uint16
Type uint16
Length uint16
Dw uint32
Ttl uint32
Reserved uint32
Data [40]byte
}
const (
TF_DISCONNECT = 1
TF_REUSE_SOCKET = 2
TF_WRITE_BEHIND = 4
TF_USE_DEFAULT_WORKER = 0
TF_USE_SYSTEM_THREAD = 16
TF_USE_KERNEL_APC = 32
)
type TransmitFileBuffers struct {
Head uintptr
HeadLength uint32
Tail uintptr
TailLength uint32
}
const (
IFF_UP = 1
IFF_BROADCAST = 2
IFF_LOOPBACK = 4
IFF_POINTTOPOINT = 8
IFF_MULTICAST = 16
)
const SIO_GET_INTERFACE_LIST = 0x4004747F
// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old.
// will be fixed to change variable type as suitable.
type SockaddrGen [24]byte
type InterfaceInfo struct {
Flags uint32
Address SockaddrGen
BroadcastAddress SockaddrGen
Netmask SockaddrGen
}
type IpAddressString struct {
String [16]byte
}
type IpMaskString IpAddressString
type IpAddrString struct {
Next *IpAddrString
IpAddress IpAddressString
IpMask IpMaskString
Context uint32
}
const MAX_ADAPTER_NAME_LENGTH = 256
const MAX_ADAPTER_DESCRIPTION_LENGTH = 128
const MAX_ADAPTER_ADDRESS_LENGTH = 8
type IpAdapterInfo struct {
Next *IpAdapterInfo
ComboIndex uint32
AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte
Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte
AddressLength uint32
Address [MAX_ADAPTER_ADDRESS_LENGTH]byte
Index uint32
Type uint32
DhcpEnabled uint32
CurrentIpAddress *IpAddrString
IpAddressList IpAddrString
GatewayList IpAddrString
DhcpServer IpAddrString
HaveWins bool
PrimaryWinsServer IpAddrString
SecondaryWinsServer IpAddrString
LeaseObtained int64
LeaseExpires int64
}
const MAXLEN_PHYSADDR = 8
const MAX_INTERFACE_NAME_LEN = 256
const MAXLEN_IFDESCR = 256
type MibIfRow struct {
Name [MAX_INTERFACE_NAME_LEN]uint16
Index uint32
Type uint32
Mtu uint32
Speed uint32
PhysAddrLen uint32
PhysAddr [MAXLEN_PHYSADDR]byte
AdminStatus uint32
OperStatus uint32
LastChange uint32
InOctets uint32
InUcastPkts uint32
InNUcastPkts uint32
InDiscards uint32
InErrors uint32
InUnknownProtos uint32
OutOctets uint32
OutUcastPkts uint32
OutNUcastPkts uint32
OutDiscards uint32
OutErrors uint32
OutQLen uint32
DescrLen uint32
Descr [MAXLEN_IFDESCR]byte
}
type CertContext struct {
EncodingType uint32
EncodedCert *byte
Length uint32
CertInfo uintptr
Store Handle
}
type CertChainContext struct {
Size uint32
TrustStatus CertTrustStatus
ChainCount uint32
Chains **CertSimpleChain
LowerQualityChainCount uint32
LowerQualityChains **CertChainContext
HasRevocationFreshnessTime uint32
RevocationFreshnessTime uint32
}
type CertSimpleChain struct {
Size uint32
TrustStatus CertTrustStatus
NumElements uint32
Elements **CertChainElement
TrustListInfo uintptr
HasRevocationFreshnessTime uint32
RevocationFreshnessTime uint32
}
type CertChainElement struct {
Size uint32
CertContext *CertContext
TrustStatus CertTrustStatus
RevocationInfo *CertRevocationInfo
IssuanceUsage *CertEnhKeyUsage
ApplicationUsage *CertEnhKeyUsage
ExtendedErrorInfo *uint16
}
type CertRevocationInfo struct {
Size uint32
RevocationResult uint32
RevocationOid *byte
OidSpecificInfo uintptr
HasFreshnessTime uint32
FreshnessTime uint32
CrlInfo uintptr // *CertRevocationCrlInfo
}
type CertTrustStatus struct {
ErrorStatus uint32
InfoStatus uint32
}
type CertUsageMatch struct {
Type uint32
Usage CertEnhKeyUsage
}
type CertEnhKeyUsage struct {
Length uint32
UsageIdentifiers **byte
}
type CertChainPara struct {
Size uint32
RequestedUsage CertUsageMatch
RequstedIssuancePolicy CertUsageMatch
URLRetrievalTimeout uint32
CheckRevocationFreshnessTime uint32
RevocationFreshnessTime uint32
CacheResync *Filetime
}
type CertChainPolicyPara struct {
Size uint32
Flags uint32
ExtraPolicyPara uintptr
}
type SSLExtraCertChainPolicyPara struct {
Size uint32
AuthType uint32
Checks uint32
ServerName *uint16
}
type CertChainPolicyStatus struct {
Size uint32
Error uint32
ChainIndex uint32
ElementIndex uint32
ExtraPolicyStatus uintptr
}
const (
// do not reorder
HKEY_CLASSES_ROOT = 0x80000000 + iota
HKEY_CURRENT_USER
HKEY_LOCAL_MACHINE
HKEY_USERS
HKEY_PERFORMANCE_DATA
HKEY_CURRENT_CONFIG
HKEY_DYN_DATA
KEY_QUERY_VALUE = 1
KEY_SET_VALUE = 2
KEY_CREATE_SUB_KEY = 4
KEY_ENUMERATE_SUB_KEYS = 8
KEY_NOTIFY = 16
KEY_CREATE_LINK = 32
KEY_WRITE = 0x20006
KEY_EXECUTE = 0x20019
KEY_READ = 0x20019
KEY_WOW64_64KEY = 0x0100
KEY_WOW64_32KEY = 0x0200
KEY_ALL_ACCESS = 0xf003f
)
const (
// do not reorder
REG_NONE = iota
REG_SZ
REG_EXPAND_SZ
REG_BINARY
REG_DWORD_LITTLE_ENDIAN
REG_DWORD_BIG_ENDIAN
REG_LINK
REG_MULTI_SZ
REG_RESOURCE_LIST
REG_FULL_RESOURCE_DESCRIPTOR
REG_RESOURCE_REQUIREMENTS_LIST
REG_QWORD_LITTLE_ENDIAN
REG_DWORD = REG_DWORD_LITTLE_ENDIAN
REG_QWORD = REG_QWORD_LITTLE_ENDIAN
)
type AddrinfoW struct {
Flags int32
Family int32
Socktype int32
Protocol int32
Addrlen uintptr
Canonname *uint16
Addr uintptr
Next *AddrinfoW
}
const (
AI_PASSIVE = 1
AI_CANONNAME = 2
AI_NUMERICHOST = 4
)
type GUID struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}
var WSAID_CONNECTEX = GUID{
0x25a207b9,
0xddf3,
0x4660,
[8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
}
const (
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
FILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
const (
WSAPROTOCOL_LEN = 255
MAX_PROTOCOL_CHAIN = 7
BASE_PROTOCOL = 1
LAYERED_PROTOCOL = 0
XP1_CONNECTIONLESS = 0x00000001
XP1_GUARANTEED_DELIVERY = 0x00000002
XP1_GUARANTEED_ORDER = 0x00000004
XP1_MESSAGE_ORIENTED = 0x00000008
XP1_PSEUDO_STREAM = 0x00000010
XP1_GRACEFUL_CLOSE = 0x00000020
XP1_EXPEDITED_DATA = 0x00000040
XP1_CONNECT_DATA = 0x00000080
XP1_DISCONNECT_DATA = 0x00000100
XP1_SUPPORT_BROADCAST = 0x00000200
XP1_SUPPORT_MULTIPOINT = 0x00000400
XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800
XP1_MULTIPOINT_DATA_PLANE = 0x00001000
XP1_QOS_SUPPORTED = 0x00002000
XP1_UNI_SEND = 0x00008000
XP1_UNI_RECV = 0x00010000
XP1_IFS_HANDLES = 0x00020000
XP1_PARTIAL_MESSAGE = 0x00040000
XP1_SAN_SUPPORT_SDP = 0x00080000
PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001
PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002
PFL_HIDDEN = 0x00000004
PFL_MATCHES_PROTOCOL_ZERO = 0x00000008
PFL_NETWORKDIRECT_PROVIDER = 0x00000010
)
type WSAProtocolInfo struct {
ServiceFlags1 uint32
ServiceFlags2 uint32
ServiceFlags3 uint32
ServiceFlags4 uint32
ProviderFlags uint32
ProviderId GUID
CatalogEntryId uint32
ProtocolChain WSAProtocolChain
Version int32
AddressFamily int32
MaxSockAddr int32
MinSockAddr int32
SocketType int32
Protocol int32
ProtocolMaxOffset int32
NetworkByteOrder int32
SecurityScheme int32
MessageSize uint32
ProviderReserved uint32
ProtocolName [WSAPROTOCOL_LEN + 1]uint16
}
type WSAProtocolChain struct {
ChainLen int32
ChainEntries [MAX_PROTOCOL_CHAIN]uint32
}
type TCPKeepalive struct {
OnOff uint32
Time uint32
Interval uint32
}
type symbolicLinkReparseBuffer struct {
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
Flags uint32
PathBuffer [1]uint16
}
type mountPointReparseBuffer struct {
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
PathBuffer [1]uint16
}
type reparseDataBuffer struct {
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
// GenericReparseBuffer
reparseBuffer byte
}
const (
FSCTL_GET_REPARSE_POINT = 0x900A8
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
)
const (
ComputerNameNetBIOS = 0
ComputerNameDnsHostname = 1
ComputerNameDnsDomain = 2
ComputerNameDnsFullyQualified = 3
ComputerNamePhysicalNetBIOS = 4
ComputerNamePhysicalDnsHostname = 5
ComputerNamePhysicalDnsDomain = 6
ComputerNamePhysicalDnsFullyQualified = 7
ComputerNameMax = 8
)
|
{
"pile_set_name": "Github"
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag contains functionality handling tags and related data.
package tag // import "golang.org/x/text/internal/tag"
import "sort"
// An Index converts tags to a compact numeric value.
//
// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
// be used to store additional information about the tag.
type Index string
// Elem returns the element data at the given index.
func (s Index) Elem(x int) string {
return string(s[x*4 : x*4+4])
}
// Index reports the index of the given key or -1 if it could not be found.
// Only the first len(key) bytes from the start of the 4-byte entries will be
// considered for the search and the first match in Index will be returned.
func (s Index) Index(key []byte) int {
n := len(key)
// search the index of the first entry with an equal or higher value than
// key in s.
index := sort.Search(len(s)/4, func(i int) bool {
return cmp(s[i*4:i*4+n], key) != -1
})
i := index * 4
if cmp(s[i:i+len(key)], key) != 0 {
return -1
}
return index
}
// Next finds the next occurrence of key after index x, which must have been
// obtained from a call to Index using the same key. It returns x+1 or -1.
func (s Index) Next(key []byte, x int) int {
if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
return x
}
return -1
}
// cmp returns an integer comparing a and b lexicographically.
func cmp(a Index, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i, c := range b[:n] {
switch {
case a[i] > c:
return 1
case a[i] < c:
return -1
}
}
switch {
case len(a) < len(b):
return -1
case len(a) > len(b):
return 1
}
return 0
}
// Compare returns an integer comparing a and b lexicographically.
func Compare(a string, b []byte) int {
return cmp(Index(a), b)
}
// FixCase reformats b to the same pattern of cases as form.
// If returns false if string b is malformed.
func FixCase(form string, b []byte) bool {
if len(form) != len(b) {
return false
}
for i, c := range b {
if form[i] <= 'Z' {
if c >= 'a' {
c -= 'z' - 'Z'
}
if c < 'A' || 'Z' < c {
return false
}
} else {
if c <= 'Z' {
c += 'z' - 'Z'
}
if c < 'a' || 'z' < c {
return false
}
}
b[i] = c
}
return true
}
|
{
"pile_set_name": "Github"
}
|
//
// std_type_info.cpp
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// Definitions of the std::type_info implementation functions, used for
// Run-Time Type Information (RTTI).
//
#include <vcruntime_internal.h>
#include <vcruntime_string.h>
#include <vcruntime_typeinfo.h>
//#include <undname.h>
#if 0
//直接由msvcrt.dll提供
extern "C" int __cdecl __std_type_info_compare(
__std_type_info_data const* const lhs,
__std_type_info_data const* const rhs
)
{
if (lhs == rhs)
{
return 0;
}
return strcmp(lhs->_DecoratedName + 1, rhs->_DecoratedName + 1);
}
#endif
extern "C" size_t __cdecl __std_type_info_hash(
__std_type_info_data const* const data
)
{
// FNV-1a hash function for the undecorated name
#ifdef _WIN64
static_assert(sizeof(size_t) == 8, "This code is for 64-bit size_t.");
size_t const fnv_offset_basis = 14695981039346656037ULL;
size_t const fnv_prime = 1099511628211ULL;
#else
static_assert(sizeof(size_t) == 4, "This code is for 32-bit size_t.");
size_t const fnv_offset_basis = 2166136261U;
size_t const fnv_prime = 16777619U;
#endif
size_t value = fnv_offset_basis;
for (char const* it = data->_DecoratedName + 1; *it != '\0'; ++it)
{
value ^= static_cast<size_t>(static_cast<unsigned char>(*it));
value *= fnv_prime;
}
#ifdef _WIN64
static_assert(sizeof(size_t) == 8, "This code is for 64-bit size_t.");
value ^= value >> 32;
#else
static_assert(sizeof(size_t) == 4, "This code is for 32-bit size_t.");
#endif
return value;
}
#if 0
//直接由msvcrt.dll提供
extern "C" char const* __cdecl __std_type_info_name(
__std_type_info_data* const data,
__type_info_node* const root_node
)
{
// First check to see if we've already cached the undecorated name; if we
// have, we can just return it:
{
char const* const cached_undecorated_name = __crt_interlocked_read_pointer(&data->_UndecoratedName);
if (cached_undecorated_name)
{
return cached_undecorated_name;
}
}
__crt_unique_heap_ptr<char> undecorated_name(__unDName(
nullptr,
data->_DecoratedName + 1,
0,
[](size_t const n) { return _malloc_crt(n); },
[](void* const p) { return _free_crt(p); },
UNDNAME_32_BIT_DECODE | UNDNAME_TYPE_ONLY));
if (!undecorated_name)
{
return nullptr; // CRT_REFACTOR TODO This is nonconforming
}
size_t undecorated_name_length = strlen(undecorated_name.get());
while (undecorated_name_length != 0 && undecorated_name.get()[undecorated_name_length - 1] == ' ')
{
undecorated_name.get()[undecorated_name_length - 1] = '\0';
--undecorated_name_length;
}
size_t const undecorated_name_count = undecorated_name_length + 1;
size_t const node_size = sizeof(SLIST_ENTRY) + undecorated_name_count;
__crt_unique_heap_ptr<void> node_block(_malloc_crt(node_size));
if (!node_block)
{
return nullptr; // CRT_REFACTOR TODO This is nonconforming
}
PSLIST_ENTRY const node_header = static_cast<PSLIST_ENTRY>(node_block.get());
char* const node_string = reinterpret_cast<char*>(node_header + 1);
*node_header = SLIST_ENTRY{};
strcpy_s(node_string, undecorated_name_count, undecorated_name.get());
char const* const cached_undecorated_name = __crt_interlocked_compare_exchange_pointer(
&data->_UndecoratedName,
node_string,
nullptr);
// If the cache already contained an undecorated name pointer, another
// thread must have cached it while we were computing the undecorated
// name. Discard the string we created and return the cached string:
if (cached_undecorated_name)
{
return cached_undecorated_name;
}
// Otherwise, we've successfully cached our string; link it into the list
// and return it:
node_block.detach();
InterlockedPushEntrySList(&root_node->_Header, node_header);
return node_string;
}
#endif
// This function is called during module unload to clean up all of the undecorated
// name strings that were allocated by calls to name().
extern "C" void __cdecl __std_type_info_destroy_list(
__type_info_node* const root_node
)
{
PSLIST_ENTRY current_node = InterlockedFlushSList(&root_node->_Header);
while (current_node)
{
PSLIST_ENTRY const next_node = current_node->Next;
_free_crt(current_node);
current_node = next_node;
}
}
|
{
"pile_set_name": "Github"
}
|
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Unserialize
* @package Unserialize_Reader
* @copyright Copyright (c) 2006-2020 Magento, Inc. (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Class Unserialize_Reader_Int
*/
class Unserialize_Reader_Int
{
/**
* @var int
*/
protected $_status;
/**
* @var string|int
*/
protected $_value;
const READING_VALUE = 1;
/**
* @param string $char
* @param string $prevChar
* @return int|null
*/
public function read($char, $prevChar)
{
if ($prevChar == Unserialize_Parser::SYMBOL_COLON) {
$this->_value .= $char;
$this->_status = self::READING_VALUE;
return null;
}
if ($this->_status == self::READING_VALUE) {
if ($char !== Unserialize_Parser::SYMBOL_SEMICOLON) {
$this->_value .= $char;
} else {
return (int)$this->_value;
}
}
return null;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_PLANE_H_
#define _STI_PLANE_H_
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
extern struct drm_plane_funcs sti_plane_helpers_funcs;
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
#define STI_PLANE_TYPE_SHIFT 8
#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
enum sti_plane_type {
STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
};
enum sti_plane_id_of_type {
STI_ID_0 = 0,
STI_ID_1 = 1,
STI_ID_2 = 2,
STI_ID_3 = 3
};
enum sti_plane_desc {
STI_GDP_0 = STI_GDP | STI_ID_0,
STI_GDP_1 = STI_GDP | STI_ID_1,
STI_GDP_2 = STI_GDP | STI_ID_2,
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
enum sti_plane_status {
STI_PLANE_READY,
STI_PLANE_UPDATED,
STI_PLANE_DISABLING,
STI_PLANE_FLUSHING,
STI_PLANE_DISABLED,
};
#define FPS_LENGTH 64
struct sti_fps_info {
bool output;
unsigned int curr_frame_counter;
unsigned int last_frame_counter;
unsigned int curr_field_counter;
unsigned int last_field_counter;
struct timespec last_timestamp;
char fps_str[FPS_LENGTH];
char fips_str[FPS_LENGTH];
};
/**
* STI plane structure
*
* @plane: drm plane it is bound to (if any)
* @desc: plane type & id
* @status: to know the status of the plane
* @zorder: plane z-order
* @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
int zorder;
struct sti_fps_info fps_info;
};
const char *sti_plane_to_str(struct sti_plane *plane);
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field);
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
#endif
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2011-2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-thin-metadata.h"
#include "persistent-data/dm-btree.h"
#include "persistent-data/dm-space-map.h"
#include "persistent-data/dm-space-map-disk.h"
#include "persistent-data/dm-transaction-manager.h"
#include <linux/list.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
/*--------------------------------------------------------------------------
* As far as the metadata goes, there is:
*
* - A superblock in block zero, taking up fewer than 512 bytes for
* atomic writes.
*
* - A space map managing the metadata blocks.
*
* - A space map managing the data blocks.
*
* - A btree mapping our internal thin dev ids onto struct disk_device_details.
*
* - A hierarchical btree, with 2 levels which effectively maps (thin
* dev id, virtual block) -> block_time. Block time is a 64-bit
* field holding the time in the low 24 bits, and block in the top 48
* bits.
*
* BTrees consist solely of btree_nodes, that fill a block. Some are
* internal nodes, as such their values are a __le64 pointing to other
* nodes. Leaf nodes can store data of any reasonable size (ie. much
* smaller than the block size). The nodes consist of the header,
* followed by an array of keys, followed by an array of values. We have
* to binary search on the keys so they're all held together to help the
* cpu cache.
*
* Space maps have 2 btrees:
*
* - One maps a uint64_t onto a struct index_entry. Which points to a
* bitmap block, and has some details about how many free entries there
* are etc.
*
* - The bitmap blocks have a header (for the checksum). Then the rest
* of the block is pairs of bits. With the meaning being:
*
* 0 - ref count is 0
* 1 - ref count is 1
* 2 - ref count is 2
* 3 - ref count is higher than 2
*
* - If the count is higher than 2 then the ref count is entered in a
* second btree that directly maps the block_address to a uint32_t ref
* count.
*
* The space map metadata variant doesn't have a bitmaps btree. Instead
* it has one single blocks worth of index_entries. This avoids
* recursive issues with the bitmap btree needing to allocate space in
* order to insert. With a small data block size such as 64k the
* metadata support data devices that are hundreds of terrabytes.
*
* The space maps allocate space linearly from front to back. Space that
* is freed in a transaction is never recycled within that transaction.
* To try and avoid fragmenting _free_ space the allocator always goes
* back and fills in gaps.
*
* All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
* from the block manager.
*--------------------------------------------------------------------------*/
#define DM_MSG_PREFIX "thin metadata"
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
#define THIN_VERSION 2
#define SECTOR_TO_BLOCK_SHIFT 3
/*
* For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
* For btree remove:
* 2 for shadow spine +
* 4 for rebalance 3 child node
*/
#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
/*
* Little endian on-disk superblock and device details.
*/
struct thin_disk_superblock {
__le32 csum; /* Checksum of superblock except for this field. */
__le32 flags;
__le64 blocknr; /* This block number, dm_block_t. */
__u8 uuid[16];
__le64 magic;
__le32 version;
__le32 time;
__le64 trans_id;
/*
* Root held by userspace transactions.
*/
__le64 held_root;
__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
/*
* 2-level btree mapping (dev_id, (dev block, time)) -> data block
*/
__le64 data_mapping_root;
/*
* Device detail root mapping dev_id -> device_details
*/
__le64 device_details_root;
__le32 data_block_size; /* In 512-byte sectors. */
__le32 metadata_block_size; /* In 512-byte sectors. */
__le64 metadata_nr_blocks;
__le32 compat_flags;
__le32 compat_ro_flags;
__le32 incompat_flags;
} __packed;
struct disk_device_details {
__le64 mapped_blocks;
__le64 transaction_id; /* When created. */
__le32 creation_time;
__le32 snapshotted_time;
} __packed;
struct dm_pool_metadata {
struct hlist_node hash;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
struct dm_space_map *data_sm;
struct dm_transaction_manager *tm;
struct dm_transaction_manager *nb_tm;
/*
* Two-level btree.
* First level holds thin_dev_t.
* Second level holds mappings.
*/
struct dm_btree_info info;
/*
* Non-blocking version of the above.
*/
struct dm_btree_info nb_info;
/*
* Just the top level for deleting whole devices.
*/
struct dm_btree_info tl_info;
/*
* Just the bottom level for creating new devices.
*/
struct dm_btree_info bl_info;
/*
* Describes the device details btree.
*/
struct dm_btree_info details_info;
struct rw_semaphore root_lock;
uint32_t time;
dm_block_t root;
dm_block_t details_root;
struct list_head thin_devices;
uint64_t trans_id;
unsigned long flags;
sector_t data_block_size;
/*
* We reserve a section of the metadata for commit overhead.
* All reported space does *not* include this.
*/
dm_block_t metadata_reserve;
/*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
*/
bool fail_io:1;
/*
* Set once a thin-pool has been accessed through one of the interfaces
* that imply the pool is in-service (e.g. thin devices created/deleted,
* thin-pool message, metadata snapshots, etc).
*/
bool in_service:1;
/*
* Reading the space map roots can fail, so we read it into these
* buffers before the superblock is locked and updated.
*/
__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
};
struct dm_thin_device {
struct list_head list;
struct dm_pool_metadata *pmd;
dm_thin_id id;
int open_count;
bool changed:1;
bool aborted_with_changes:1;
uint64_t mapped_blocks;
uint64_t transaction_id;
uint32_t creation_time;
uint32_t snapshotted_time;
};
/*----------------------------------------------------------------
* superblock validator
*--------------------------------------------------------------*/
#define SUPERBLOCK_CSUM_XOR 160774
static void sb_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct thin_disk_superblock *disk_super = dm_block_data(b);
disk_super->blocknr = cpu_to_le64(dm_block_location(b));
disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
}
static int sb_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
struct thin_disk_superblock *disk_super = dm_block_data(b);
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
DMERR("sb_check failed: blocknr %llu: "
"wanted %llu", le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
DMERR("sb_check failed: magic %llu: "
"wanted %llu", le64_to_cpu(disk_super->magic),
(unsigned long long)THIN_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
DMERR("sb_check failed: csum %u: wanted %u",
le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
return 0;
}
static struct dm_block_validator sb_validator = {
.name = "superblock",
.prepare_for_write = sb_prepare_for_write,
.check = sb_check
};
/*----------------------------------------------------------------
* Methods for the btree value types
*--------------------------------------------------------------*/
static uint64_t pack_block_time(dm_block_t b, uint32_t t)
{
return (b << 24) | t;
}
static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
{
*b = v >> 24;
*t = v & ((1 << 24) - 1);
}
static void data_block_inc(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
uint64_t b;
uint32_t t;
memcpy(&v_le, value_le, sizeof(v_le));
unpack_block_time(le64_to_cpu(v_le), &b, &t);
dm_sm_inc_block(sm, b);
}
static void data_block_dec(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
uint64_t b;
uint32_t t;
memcpy(&v_le, value_le, sizeof(v_le));
unpack_block_time(le64_to_cpu(v_le), &b, &t);
dm_sm_dec_block(sm, b);
}
static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
uint64_t b1, b2;
uint32_t t;
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
return b1 == b2;
}
static void subtree_inc(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
uint64_t root;
memcpy(&root_le, value, sizeof(root_le));
root = le64_to_cpu(root_le);
dm_tm_inc(info->tm, root);
}
static void subtree_dec(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
uint64_t root;
memcpy(&root_le, value, sizeof(root_le));
root = le64_to_cpu(root_le);
if (dm_btree_del(info, root))
DMERR("btree delete failed");
}
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
return v1_le == v2_le;
}
/*----------------------------------------------------------------*/
/*
* Variant that is used for in-core only changes or code that
* shouldn't put the pool in service on its own (e.g. commit).
*/
static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
__acquires(pmd->root_lock)
{
down_write(&pmd->root_lock);
}
#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
{
__pmd_write_lock(pmd);
if (unlikely(!pmd->in_service))
pmd->in_service = true;
}
static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
__releases(pmd->root_lock)
{
up_write(&pmd->root_lock);
}
/*----------------------------------------------------------------*/
static int superblock_lock_zero(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int superblock_lock(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, sblock);
}
static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
unsigned i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
*/
r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
if (r)
return r;
data_le = dm_block_data(b);
*result = 1;
for (i = 0; i < block_size; i++) {
if (data_le[i] != zero) {
*result = 0;
break;
}
}
dm_bm_unlock(b);
return 0;
}
static void __setup_btree_details(struct dm_pool_metadata *pmd)
{
pmd->info.tm = pmd->tm;
pmd->info.levels = 2;
pmd->info.value_type.context = pmd->data_sm;
pmd->info.value_type.size = sizeof(__le64);
pmd->info.value_type.inc = data_block_inc;
pmd->info.value_type.dec = data_block_dec;
pmd->info.value_type.equal = data_block_equal;
memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
pmd->nb_info.tm = pmd->nb_tm;
pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
pmd->tl_info.value_type.context = &pmd->bl_info;
pmd->tl_info.value_type.size = sizeof(__le64);
pmd->tl_info.value_type.inc = subtree_inc;
pmd->tl_info.value_type.dec = subtree_dec;
pmd->tl_info.value_type.equal = subtree_equal;
pmd->bl_info.tm = pmd->tm;
pmd->bl_info.levels = 1;
pmd->bl_info.value_type.context = pmd->data_sm;
pmd->bl_info.value_type.size = sizeof(__le64);
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
pmd->bl_info.value_type.equal = data_block_equal;
pmd->details_info.tm = pmd->tm;
pmd->details_info.levels = 1;
pmd->details_info.value_type.context = NULL;
pmd->details_info.value_type.size = sizeof(struct disk_device_details);
pmd->details_info.value_type.inc = NULL;
pmd->details_info.value_type.dec = NULL;
pmd->details_info.value_type.equal = NULL;
}
static int save_sm_roots(struct dm_pool_metadata *pmd)
{
int r;
size_t len;
r = dm_sm_root_size(pmd->metadata_sm, &len);
if (r < 0)
return r;
r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
if (r < 0)
return r;
r = dm_sm_root_size(pmd->data_sm, &len);
if (r < 0)
return r;
return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
}
static void copy_sm_roots(struct dm_pool_metadata *pmd,
struct thin_disk_superblock *disk)
{
memcpy(&disk->metadata_space_map_root,
&pmd->metadata_space_map_root,
sizeof(pmd->metadata_space_map_root));
memcpy(&disk->data_space_map_root,
&pmd->data_space_map_root,
sizeof(pmd->data_space_map_root));
}
static int __write_initial_superblock(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
r = dm_sm_commit(pmd->data_sm);
if (r < 0)
return r;
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
r = save_sm_roots(pmd);
if (r < 0)
return r;
r = superblock_lock_zero(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
disk_super->version = cpu_to_le32(THIN_VERSION);
disk_super->time = 0;
disk_super->trans_id = 0;
disk_super->held_root = 0;
copy_sm_roots(pmd, disk_super);
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
return dm_tm_commit(pmd->tm, sblock);
}
static int __format_metadata(struct dm_pool_metadata *pmd)
{
int r;
r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&pmd->tm, &pmd->metadata_sm);
if (r < 0) {
DMERR("tm_create_with_sm failed");
return r;
}
pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
if (IS_ERR(pmd->data_sm)) {
DMERR("sm_disk_create failed");
r = PTR_ERR(pmd->data_sm);
goto bad_cleanup_tm;
}
pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
if (!pmd->nb_tm) {
DMERR("could not create non-blocking clone tm");
r = -ENOMEM;
goto bad_cleanup_data_sm;
}
__setup_btree_details(pmd);
r = dm_btree_empty(&pmd->info, &pmd->root);
if (r < 0)
goto bad_cleanup_nb_tm;
r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
if (r < 0) {
DMERR("couldn't create devices root");
goto bad_cleanup_nb_tm;
}
r = __write_initial_superblock(pmd);
if (r)
goto bad_cleanup_nb_tm;
return 0;
bad_cleanup_nb_tm:
dm_tm_destroy(pmd->nb_tm);
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
bad_cleanup_tm:
dm_tm_destroy(pmd->tm);
dm_sm_destroy(pmd->metadata_sm);
return r;
}
static int __check_incompat_features(struct thin_disk_superblock *disk_super,
struct dm_pool_metadata *pmd)
{
uint32_t features;
features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
if (features) {
DMERR("could not access metadata due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
if (get_disk_ro(pmd->bdev->bd_disk))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
if (features) {
DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
(unsigned long)features);
return -EINVAL;
}
return 0;
}
static int __open_metadata(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r < 0) {
DMERR("couldn't read superblock");
return r;
}
disk_super = dm_block_data(sblock);
/* Verify the data block size hasn't changed */
if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
DMERR("changing the data block size (from %u to %llu) is not supported",
le32_to_cpu(disk_super->data_block_size),
(unsigned long long)pmd->data_block_size);
r = -EINVAL;
goto bad_unlock_sblock;
}
r = __check_incompat_features(disk_super, pmd);
if (r < 0)
goto bad_unlock_sblock;
r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
disk_super->metadata_space_map_root,
sizeof(disk_super->metadata_space_map_root),
&pmd->tm, &pmd->metadata_sm);
if (r < 0) {
DMERR("tm_open_with_sm failed");
goto bad_unlock_sblock;
}
pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
sizeof(disk_super->data_space_map_root));
if (IS_ERR(pmd->data_sm)) {
DMERR("sm_disk_open failed");
r = PTR_ERR(pmd->data_sm);
goto bad_cleanup_tm;
}
pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
if (!pmd->nb_tm) {
DMERR("could not create non-blocking clone tm");
r = -ENOMEM;
goto bad_cleanup_data_sm;
}
__setup_btree_details(pmd);
dm_bm_unlock(sblock);
return 0;
bad_cleanup_data_sm:
dm_sm_destroy(pmd->data_sm);
bad_cleanup_tm:
dm_tm_destroy(pmd->tm);
dm_sm_destroy(pmd->metadata_sm);
bad_unlock_sblock:
dm_bm_unlock(sblock);
return r;
}
static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
{
int r, unformatted;
r = __superblock_all_zeroes(pmd->bm, &unformatted);
if (r)
return r;
if (unformatted)
return format_device ? __format_metadata(pmd) : -EPERM;
return __open_metadata(pmd);
}
static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
{
int r;
pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
return PTR_ERR(pmd->bm);
}
r = __open_or_format_metadata(pmd, format_device);
if (r)
dm_block_manager_destroy(pmd->bm);
return r;
}
static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
{
dm_sm_destroy(pmd->data_sm);
dm_sm_destroy(pmd->metadata_sm);
dm_tm_destroy(pmd->nb_tm);
dm_tm_destroy(pmd->tm);
dm_block_manager_destroy(pmd->bm);
}
static int __begin_transaction(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We re-read the superblock every time. Shouldn't need to do this
* really.
*/
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
pmd->time = le32_to_cpu(disk_super->time);
pmd->root = le64_to_cpu(disk_super->data_mapping_root);
pmd->details_root = le64_to_cpu(disk_super->device_details_root);
pmd->trans_id = le64_to_cpu(disk_super->trans_id);
pmd->flags = le32_to_cpu(disk_super->flags);
pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
dm_bm_unlock(sblock);
return 0;
}
static int __write_changed_details(struct dm_pool_metadata *pmd)
{
int r;
struct dm_thin_device *td, *tmp;
struct disk_device_details details;
uint64_t key;
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (!td->changed)
continue;
key = td->id;
details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
details.transaction_id = cpu_to_le64(td->transaction_id);
details.creation_time = cpu_to_le32(td->creation_time);
details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
__dm_bless_for_disk(&details);
r = dm_btree_insert(&pmd->details_info, pmd->details_root,
&key, &details, &pmd->details_root);
if (r)
return r;
if (td->open_count)
td->changed = 0;
else {
list_del(&td->list);
kfree(td);
}
}
return 0;
}
static int __commit_transaction(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
/*
* We need to know if the thin_disk_superblock exceeds a 512-byte sector.
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
if (unlikely(!pmd->in_service))
return 0;
r = __write_changed_details(pmd);
if (r < 0)
return r;
r = dm_sm_commit(pmd->data_sm);
if (r < 0)
return r;
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
r = save_sm_roots(pmd);
if (r < 0)
return r;
r = superblock_lock(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
disk_super->time = cpu_to_le32(pmd->time);
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
disk_super->trans_id = cpu_to_le64(pmd->trans_id);
disk_super->flags = cpu_to_le32(pmd->flags);
copy_sm_roots(pmd, disk_super);
return dm_tm_commit(pmd->tm, sblock);
}
static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
{
int r;
dm_block_t total;
dm_block_t max_blocks = 4096; /* 16M */
r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
if (r) {
DMERR("could not get size of metadata device");
pmd->metadata_reserve = max_blocks;
} else
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
{
int r;
struct dm_pool_metadata *pmd;
pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
if (!pmd) {
DMERR("could not allocate metadata struct");
return ERR_PTR(-ENOMEM);
}
init_rwsem(&pmd->root_lock);
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
pmd->fail_io = false;
pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
r = __create_persistent_data_objects(pmd, format_device);
if (r) {
kfree(pmd);
return ERR_PTR(r);
}
r = __begin_transaction(pmd);
if (r < 0) {
if (dm_pool_metadata_close(pmd) < 0)
DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
return ERR_PTR(r);
}
__set_metadata_reserve(pmd);
return pmd;
}
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
{
int r;
unsigned open_devices = 0;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (td->open_count)
open_devices++;
else {
list_del(&td->list);
kfree(td);
}
}
up_read(&pmd->root_lock);
if (open_devices) {
DMERR("attempt to close pmd when %u device(s) are still open",
open_devices);
return -EBUSY;
}
if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
kfree(pmd);
return 0;
}
/*
* __open_device: Returns @td corresponding to device with id @dev,
* creating it if @create is set and incrementing @td->open_count.
* On failure, @td is undefined.
*/
static int __open_device(struct dm_pool_metadata *pmd,
dm_thin_id dev, int create,
struct dm_thin_device **td)
{
int r, changed = 0;
struct dm_thin_device *td2;
uint64_t key = dev;
struct disk_device_details details_le;
/*
* If the device is already open, return it.
*/
list_for_each_entry(td2, &pmd->thin_devices, list)
if (td2->id == dev) {
/*
* May not create an already-open device.
*/
if (create)
return -EEXIST;
td2->open_count++;
*td = td2;
return 0;
}
/*
* Check the device exists.
*/
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&key, &details_le);
if (r) {
if (r != -ENODATA || !create)
return r;
/*
* Create new device.
*/
changed = 1;
details_le.mapped_blocks = 0;
details_le.transaction_id = cpu_to_le64(pmd->trans_id);
details_le.creation_time = cpu_to_le32(pmd->time);
details_le.snapshotted_time = cpu_to_le32(pmd->time);
}
*td = kmalloc(sizeof(**td), GFP_NOIO);
if (!*td)
return -ENOMEM;
(*td)->pmd = pmd;
(*td)->id = dev;
(*td)->open_count = 1;
(*td)->changed = changed;
(*td)->aborted_with_changes = false;
(*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
(*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
(*td)->creation_time = le32_to_cpu(details_le.creation_time);
(*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
list_add(&(*td)->list, &pmd->thin_devices);
return 0;
}
static void __close_device(struct dm_thin_device *td)
{
--td->open_count;
}
static int __create_thin(struct dm_pool_metadata *pmd,
dm_thin_id dev)
{
int r;
dm_block_t dev_root;
uint64_t key = dev;
struct disk_device_details details_le;
struct dm_thin_device *td;
__le64 value;
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&key, &details_le);
if (!r)
return -EEXIST;
/*
* Create an empty btree for the mappings.
*/
r = dm_btree_empty(&pmd->bl_info, &dev_root);
if (r)
return r;
/*
* Insert it into the main mapping tree.
*/
value = cpu_to_le64(dev_root);
__dm_bless_for_disk(&value);
r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
if (r) {
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
r = __open_device(pmd, dev, 1, &td);
if (r) {
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
__close_device(td);
return r;
}
int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_thin(pmd, dev);
pmd_write_unlock(pmd);
return r;
}
static int __set_snapshot_details(struct dm_pool_metadata *pmd,
struct dm_thin_device *snap,
dm_thin_id origin, uint32_t time)
{
int r;
struct dm_thin_device *td;
r = __open_device(pmd, origin, 0, &td);
if (r)
return r;
td->changed = 1;
td->snapshotted_time = time;
snap->mapped_blocks = td->mapped_blocks;
snap->snapshotted_time = time;
__close_device(td);
return 0;
}
static int __create_snap(struct dm_pool_metadata *pmd,
dm_thin_id dev, dm_thin_id origin)
{
int r;
dm_block_t origin_root;
uint64_t key = origin, dev_key = dev;
struct dm_thin_device *td;
struct disk_device_details details_le;
__le64 value;
/* check this device is unused */
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
&dev_key, &details_le);
if (!r)
return -EEXIST;
/* find the mapping tree for the origin */
r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
if (r)
return r;
origin_root = le64_to_cpu(value);
/* clone the origin, an inc will do */
dm_tm_inc(pmd->tm, origin_root);
/* insert into the main mapping tree */
value = cpu_to_le64(origin_root);
__dm_bless_for_disk(&value);
key = dev;
r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
if (r) {
dm_tm_dec(pmd->tm, origin_root);
return r;
}
pmd->time++;
r = __open_device(pmd, dev, 1, &td);
if (r)
goto bad;
r = __set_snapshot_details(pmd, td, origin, pmd->time);
__close_device(td);
if (r)
goto bad;
return 0;
bad:
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
return r;
}
int dm_pool_create_snap(struct dm_pool_metadata *pmd,
dm_thin_id dev,
dm_thin_id origin)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_snap(pmd, dev, origin);
pmd_write_unlock(pmd);
return r;
}
static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r;
uint64_t key = dev;
struct dm_thin_device *td;
/* TODO: failure should mark the transaction invalid */
r = __open_device(pmd, dev, 0, &td);
if (r)
return r;
if (td->open_count > 1) {
__close_device(td);
return -EBUSY;
}
list_del(&td->list);
kfree(td);
r = dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
if (r)
return r;
r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
if (r)
return r;
return 0;
}
int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
dm_thin_id dev)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __delete_device(pmd, dev);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t current_id,
uint64_t new_id)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
if (pmd->trans_id != current_id) {
DMERR("mismatched transaction id");
goto out;
}
pmd->trans_id = new_id;
r = 0;
out:
pmd_write_unlock(pmd);
return r;
}
int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
*result = pmd->trans_id;
r = 0;
}
up_read(&pmd->root_lock);
return r;
}
static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r, inc;
struct thin_disk_superblock *disk_super;
struct dm_block *copy, *sblock;
dm_block_t held_root;
/*
* We commit to ensure the btree roots which we increment in a
* moment are up to date.
*/
r = __commit_transaction(pmd);
if (r < 0) {
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
return r;
}
/*
* Copy the superblock.
*/
dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, ©, &inc);
if (r)
return r;
BUG_ON(!inc);
held_root = dm_block_location(copy);
disk_super = dm_block_data(copy);
if (le64_to_cpu(disk_super->held_root)) {
DMWARN("Pool metadata snapshot already exists: release this before taking another.");
dm_tm_dec(pmd->tm, held_root);
dm_tm_unlock(pmd->tm, copy);
return -EBUSY;
}
/*
* Wipe the spacemap since we're not publishing this.
*/
memset(&disk_super->data_space_map_root, 0,
sizeof(disk_super->data_space_map_root));
memset(&disk_super->metadata_space_map_root, 0,
sizeof(disk_super->metadata_space_map_root));
/*
* Increment the data structures that need to be preserved.
*/
dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
dm_tm_unlock(pmd->tm, copy);
/*
* Write the held root into the superblock.
*/
r = superblock_lock(pmd, &sblock);
if (r) {
dm_tm_dec(pmd->tm, held_root);
return r;
}
disk_super = dm_block_data(sblock);
disk_super->held_root = cpu_to_le64(held_root);
dm_bm_unlock(sblock);
return 0;
}
int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __reserve_metadata_snap(pmd);
pmd_write_unlock(pmd);
return r;
}
static int __release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock, *copy;
dm_block_t held_root;
r = superblock_lock(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
held_root = le64_to_cpu(disk_super->held_root);
disk_super->held_root = cpu_to_le64(0);
dm_bm_unlock(sblock);
if (!held_root) {
DMWARN("No pool metadata snapshot found: nothing to release.");
return -EINVAL;
}
r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©);
if (r)
return r;
disk_super = dm_block_data(copy);
dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
dm_tm_unlock(pmd->tm, copy);
return 0;
}
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __release_metadata_snap(pmd);
pmd_write_unlock(pmd);
return r;
}
static int __get_metadata_snap(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
dm_bm_unlock(sblock);
return 0;
}
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = __get_metadata_snap(pmd, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
struct dm_thin_device **td)
{
int r = -EINVAL;
pmd_write_lock_in_core(pmd);
if (!pmd->fail_io)
r = __open_device(pmd, dev, 0, td);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_close_thin_device(struct dm_thin_device *td)
{
pmd_write_lock_in_core(td->pmd);
__close_device(td);
pmd_write_unlock(td->pmd);
return 0;
}
dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
{
return td->id;
}
/*
* Check whether @time (of block creation) is older than @td's last snapshot.
* If so then the associated block is shared with the last snapshot device.
* Any block on a device created *after* the device last got snapshotted is
* necessarily not shared.
*/
static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
{
return td->snapshotted_time > time;
}
static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
struct dm_thin_lookup_result *result)
{
uint64_t block_time = 0;
dm_block_t exception_block;
uint32_t exception_time;
block_time = le64_to_cpu(value);
unpack_block_time(block_time, &exception_block, &exception_time);
result->block = exception_block;
result->shared = __snapshotted_since(td, exception_time);
}
static int __find_block(struct dm_thin_device *td, dm_block_t block,
int can_issue_io, struct dm_thin_lookup_result *result)
{
int r;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
struct dm_btree_info *info;
if (can_issue_io) {
info = &pmd->info;
} else
info = &pmd->nb_info;
r = dm_btree_lookup(info, pmd->root, keys, &value);
if (!r)
unpack_lookup_result(td, value, result);
return r;
}
int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
int can_issue_io, struct dm_thin_lookup_result *result)
{
int r;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (pmd->fail_io) {
up_read(&pmd->root_lock);
return -EINVAL;
}
r = __find_block(td, block, can_issue_io, result);
up_read(&pmd->root_lock);
return r;
}
static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t *vblock,
struct dm_thin_lookup_result *result)
{
int r;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
if (!r)
unpack_lookup_result(td, value, result);
return r;
}
static int __find_mapped_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end,
dm_block_t *thin_begin, dm_block_t *thin_end,
dm_block_t *pool_begin, bool *maybe_shared)
{
int r;
dm_block_t pool_end;
struct dm_thin_lookup_result lookup;
if (end < begin)
return -ENODATA;
r = __find_next_mapped_block(td, begin, &begin, &lookup);
if (r)
return r;
if (begin >= end)
return -ENODATA;
*thin_begin = begin;
*pool_begin = lookup.block;
*maybe_shared = lookup.shared;
begin++;
pool_end = *pool_begin + 1;
while (begin != end) {
r = __find_block(td, begin, true, &lookup);
if (r) {
if (r == -ENODATA)
break;
else
return r;
}
if ((lookup.block != pool_end) ||
(lookup.shared != *maybe_shared))
break;
pool_end++;
begin++;
}
*thin_end = begin;
return 0;
}
int dm_thin_find_mapped_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end,
dm_block_t *thin_begin, dm_block_t *thin_end,
dm_block_t *pool_begin, bool *maybe_shared)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
pool_begin, maybe_shared);
}
up_read(&pmd->root_lock);
return r;
}
static int __insert(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
int r, inserted;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
value = cpu_to_le64(pack_block_time(data_block, pmd->time));
__dm_bless_for_disk(&value);
r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
&pmd->root, &inserted);
if (r)
return r;
td->changed = 1;
if (inserted)
td->mapped_blocks++;
return 0;
}
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __insert(td, block, data_block);
pmd_write_unlock(td->pmd);
return r;
}
static int __remove(struct dm_thin_device *td, dm_block_t block)
{
int r;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
if (r)
return r;
td->mapped_blocks--;
td->changed = 1;
return 0;
}
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
unsigned count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;
dm_block_t mapping_root;
/*
* Find the mapping tree
*/
r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
if (r)
return r;
/*
* Remove from the mapping tree, taking care to inc the
* ref count so it doesn't get deleted.
*/
mapping_root = le64_to_cpu(value);
dm_tm_inc(pmd->tm, mapping_root);
r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
if (r)
return r;
/*
* Remove leaves stops at the first unmapped entry, so we have to
* loop round finding mapped ranges.
*/
while (begin < end) {
r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
if (r == -ENODATA)
break;
if (r)
return r;
if (begin >= end)
break;
r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
if (r)
return r;
total_count += count;
}
td->mapped_blocks -= total_count;
td->changed = 1;
/*
* Reinsert the mapping tree.
*/
value = cpu_to_le64(mapping_root);
__dm_bless_for_disk(&value);
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
}
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove(td, block);
pmd_write_unlock(td->pmd);
return r;
}
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove_range(td, begin, end);
pmd_write_unlock(td->pmd);
return r;
}
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{
int r;
uint32_t ref_count;
down_read(&pmd->root_lock);
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
if (!r)
*result = (ref_count > 1);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
int r = 0;
pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_inc_block(pmd->data_sm, b);
if (r)
break;
}
pmd_write_unlock(pmd);
return r;
}
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
int r = 0;
pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_dec_block(pmd->data_sm, b);
if (r)
break;
}
pmd_write_unlock(pmd);
return r;
}
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
down_read(&td->pmd->root_lock);
r = td->changed;
up_read(&td->pmd->root_lock);
return r;
}
bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
{
bool r = false;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
if (td->changed) {
r = td->changed;
break;
}
}
up_read(&pmd->root_lock);
return r;
}
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
down_read(&td->pmd->root_lock);
r = td->aborted_with_changes;
up_read(&td->pmd->root_lock);
return r;
}
int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_new_block(pmd->data_sm, result);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
/*
* Care is taken to not have commit be what
* triggers putting the thin-pool in-service.
*/
__pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
r = __commit_transaction(pmd);
if (r < 0)
goto out;
/*
* Open the next transaction.
*/
r = __begin_transaction(pmd);
out:
pmd_write_unlock(pmd);
return r;
}
static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
{
struct dm_thin_device *td;
list_for_each_entry(td, &pmd->thin_devices, list)
td->aborted_with_changes = td->changed;
}
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
__set_abort_with_changes_flags(pmd);
__destroy_persistent_data_objects(pmd);
r = __create_persistent_data_objects(pmd, false);
if (r)
pmd->fail_io = true;
out:
pmd_write_unlock(pmd);
return r;
}
int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
if (!r) {
if (*result < pmd->metadata_reserve)
*result = 0;
else
*result -= pmd->metadata_reserve;
}
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_blocks(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
}
int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io) {
*result = td->mapped_blocks;
r = 0;
}
up_read(&pmd->root_lock);
return r;
}
static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
{
int r;
__le64 value_le;
dm_block_t thin_root;
struct dm_pool_metadata *pmd = td->pmd;
r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
if (r)
return r;
thin_root = le64_to_cpu(value_le);
return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
}
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
dm_block_t *result)
{
int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = __highest_block(td, result);
up_read(&pmd->root_lock);
return r;
}
static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
{
int r;
dm_block_t old_count;
r = dm_sm_get_nr_blocks(sm, &old_count);
if (r)
return r;
if (new_count == old_count)
return 0;
if (new_count < old_count) {
DMERR("cannot reduce size of space map");
return -EINVAL;
}
return dm_sm_extend(sm, new_count - old_count);
}
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __resize_space_map(pmd->data_sm, new_count);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
pmd_write_lock(pmd);
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
pmd_write_unlock(pmd);
return r;
}
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
pmd_write_lock_in_core(pmd);
dm_bm_set_read_only(pmd->bm);
pmd_write_unlock(pmd);
}
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
pmd_write_lock_in_core(pmd);
dm_bm_set_read_write(pmd->bm);
pmd_write_unlock(pmd);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
int r;
pmd_write_lock_in_core(pmd);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
pmd_write_unlock(pmd);
return r;
}
int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
{
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
pmd_write_lock(pmd);
pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
r = superblock_lock(pmd, &sblock);
if (r) {
DMERR("couldn't read superblock");
goto out;
}
disk_super = dm_block_data(sblock);
disk_super->flags = cpu_to_le32(pmd->flags);
dm_bm_unlock(sblock);
out:
pmd_write_unlock(pmd);
return r;
}
bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
{
bool needs_check;
down_read(&pmd->root_lock);
needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
up_read(&pmd->root_lock);
return needs_check;
}
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
down_read(&pmd->root_lock);
if (!pmd->fail_io)
dm_tm_issue_prefetches(pmd->tm);
up_read(&pmd->root_lock);
}
|
{
"pile_set_name": "Github"
}
|
#--
#
# Author:: Francis Cianfrocca (gmail: blackhedd)
# Homepage:: http://rubyeventmachine.com
# Date:: 15 November 2006
#
# See EventMachine and EventMachine::Connection for documentation and
# usage examples.
#
#----------------------------------------------------------------------------
#
# Copyright (C) 2006-08 by Francis Cianfrocca. All Rights Reserved.
# Gmail: blackhedd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of either: 1) the GNU General Public License
# as published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version; or 2) Ruby's License.
#
# See the file COPYING for complete licensing information.
#
#---------------------------------------------------------------------------
#
#
#
require 'postgres-pr/message'
require 'postgres-pr/connection'
require 'stringio'
# @private
class StringIO
# Reads exactly +n+ bytes.
#
# If the data read is nil an EOFError is raised.
#
# If the data read is too short an IOError is raised
def readbytes(n)
str = read(n)
if str == nil
raise EOFError, "End of file reached"
end
if str.size < n
raise IOError, "data truncated"
end
str
end
alias read_exactly_n_bytes readbytes
end
module EventMachine
module Protocols
# PROVISIONAL IMPLEMENTATION of an evented Postgres client.
# This implements version 3 of the Postgres wire protocol, which will work
# with any Postgres version from roughly 7.4 onward.
#
# Objective: we want to access Postgres databases without requiring threads.
# Until now this has been a problem because the Postgres client implementations
# have all made use of blocking I/O calls, which is incompatible with a
# thread-free evented model.
#
# But rather than re-implement the Postgres Wire3 protocol, we're taking advantage
# of the existing postgres-pr library, which was originally written by Michael
# Neumann but (at this writing) appears to be no longer maintained. Still, it's
# in basically a production-ready state, and the wire protocol isn't that complicated
# anyway.
#
# We're tucking in a bunch of require statements that may not be present in garden-variety
# EM installations. Until we find a good way to only require these if a program
# requires postgres, this file will need to be required explicitly.
#
# We need to monkeypatch StringIO because it lacks the #readbytes method needed
# by postgres-pr.
# The StringIO monkeypatch is lifted from the standard library readbytes.rb,
# which adds method #readbytes directly to class IO. But StringIO is not a subclass of IO.
# It is modified to raise an IOError instead of TruncatedDataException since the exception is unused.
#
# We cloned the handling of postgres messages from lib/postgres-pr/connection.rb
# in the postgres-pr library, and modified it for event-handling.
#
# TODO: The password handling in dispatch_conn_message is totally incomplete.
#
#
# We return Deferrables from the user-level operations surfaced by this interface.
# Experimentally, we're using the pattern of always returning a boolean value as the
# first argument of a deferrable callback to indicate success or failure. This is
# instead of the traditional pattern of calling Deferrable#succeed or #fail, and
# requiring the user to define both a callback and an errback function.
#
# === Usage
# EM.run {
# db = EM.connect_unix_domain( "/tmp/.s.PGSQL.5432", EM::P::Postgres3 )
# db.connect( dbname, username, psw ).callback do |status|
# if status
# db.query( "select * from some_table" ).callback do |status, result, errors|
# if status
# result.rows.each do |row|
# p row
# end
# end
# end
# end
# end
# }
class Postgres3 < EventMachine::Connection
include PostgresPR
def initialize
@data = ""
@params = {}
end
def connect db, user, psw=nil
d = EM::DefaultDeferrable.new
d.timeout 15
if @pending_query || @pending_conn
d.succeed false, "Operation already in progress"
else
@pending_conn = d
prms = {"user"=>user, "database"=>db}
@user = user
if psw
@password = psw
#prms["password"] = psw
end
send_data PostgresPR::StartupMessage.new( 3 << 16, prms ).dump
end
d
end
def query sql
d = EM::DefaultDeferrable.new
d.timeout 15
if @pending_query || @pending_conn
d.succeed false, "Operation already in progress"
else
@r = PostgresPR::Connection::Result.new
@e = []
@pending_query = d
send_data PostgresPR::Query.dump(sql)
end
d
end
def receive_data data
@data << data
while @data.length >= 5
pktlen = @data[1...5].unpack("N").first
if @data.length >= (1 + pktlen)
pkt = @data.slice!(0...(1+pktlen))
m = StringIO.open( pkt, "r" ) {|io| PostgresPR::Message.read( io ) }
if @pending_conn
dispatch_conn_message m
elsif @pending_query
dispatch_query_message m
else
raise "Unexpected message from database"
end
else
break # very important, break out of the while
end
end
end
def unbind
if o = (@pending_query || @pending_conn)
o.succeed false, "lost connection"
end
end
# Cloned and modified from the postgres-pr.
def dispatch_conn_message msg
case msg
when AuthentificationClearTextPassword
raise ArgumentError, "no password specified" if @password.nil?
send_data PasswordMessage.new(@password).dump
when AuthentificationCryptPassword
raise ArgumentError, "no password specified" if @password.nil?
send_data PasswordMessage.new(@password.crypt(msg.salt)).dump
when AuthentificationMD5Password
raise ArgumentError, "no password specified" if @password.nil?
require 'digest/md5'
m = Digest::MD5.hexdigest(@password + @user)
m = Digest::MD5.hexdigest(m + msg.salt)
m = 'md5' + m
send_data PasswordMessage.new(m).dump
when AuthentificationKerberosV4, AuthentificationKerberosV5, AuthentificationSCMCredential
raise "unsupported authentification"
when AuthentificationOk
when ErrorResponse
raise msg.field_values.join("\t")
when NoticeResponse
@notice_processor.call(msg) if @notice_processor
when ParameterStatus
@params[msg.key] = msg.value
when BackendKeyData
# TODO
#p msg
when ReadyForQuery
# TODO: use transaction status
pc,@pending_conn = @pending_conn,nil
pc.succeed true
else
raise "unhandled message type"
end
end
# Cloned and modified from the postgres-pr.
def dispatch_query_message msg
case msg
when DataRow
@r.rows << msg.columns
when CommandComplete
@r.cmd_tag = msg.cmd_tag
when ReadyForQuery
pq,@pending_query = @pending_query,nil
pq.succeed true, @r, @e
when RowDescription
@r.fields = msg.fields
when CopyInResponse
when CopyOutResponse
when EmptyQueryResponse
when ErrorResponse
# TODO
@e << msg
when NoticeResponse
@notice_processor.call(msg) if @notice_processor
else
# TODO
end
end
end
end
end
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_34) on Tue May 26 09:04:14 BST 2015 -->
<TITLE>
Uses of Class org.apache.fop.afp.modca.triplets.DescriptorPositionTriplet (Apache FOP 2.0 API)
</TITLE>
<META NAME="date" CONTENT="2015-05-26">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.fop.afp.modca.triplets.DescriptorPositionTriplet (Apache FOP 2.0 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/fop/afp/modca/triplets/DescriptorPositionTriplet.html" title="class in org.apache.fop.afp.modca.triplets"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
fop 2.0</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/fop/afp/modca/triplets//class-useDescriptorPositionTriplet.html" target="_top"><B>FRAMES</B></A>
<A HREF="DescriptorPositionTriplet.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.fop.afp.modca.triplets.DescriptorPositionTriplet</B></H2>
</CENTER>
No usage of org.apache.fop.afp.modca.triplets.DescriptorPositionTriplet
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/fop/afp/modca/triplets/DescriptorPositionTriplet.html" title="class in org.apache.fop.afp.modca.triplets"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
fop 2.0</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/fop/afp/modca/triplets//class-useDescriptorPositionTriplet.html" target="_top"><B>FRAMES</B></A>
<A HREF="DescriptorPositionTriplet.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright 1999-2015 The Apache Software Foundation. All Rights Reserved.
</BODY>
</HTML>
|
{
"pile_set_name": "Github"
}
|
README for TOSThreads Blink
Author/Contact: tinyos-help@millennium.berkeley.edu
Author: Kevin Klues <klueska@cs.stanford.edu>
Description:
Blink is a simple application used to test the basic functionality of
TOSThreads.
You can install Blink on a mote via the following command:
make <platform> cthreads install
Valid platforms are currently: tmote, telosb, iris, shimmer*, span, mica2, micaz, and epic
Upon a successful burn, you should see LED0 flashing with a period of every
200ms, and LED1 and LED2 flashing in unison with a period of 1000ms.
Tools:
None.
Known bugs/limitations:
None.
|
{
"pile_set_name": "Github"
}
|
/*
**==============================================================================
**
** Copyright (c) Microsoft Corporation. All rights reserved. See file LICENSE
** for license information.
**
**==============================================================================
*/
#ifndef _omiar_xml_h
#define _omiar_xml_h
#include <stddef.h>
#include "config.h"
#include <common.h>
/* The maximum number of nested XML elements */
#define XML_MAX_NESTED 64
/* The maximum number of XML namespaces */
#define XML_MAX_NAMESPACES 32
/* The maximum number of registered XML namespaces */
#define XML_MAX_REGISTERED_NAMESPACES 32
/* The maximum number of attributes in a start tag */
#define XML_MAX_ATTRIBUTES 32
/* Represents case where tag has no namespace */
#define XML_NAMESPACE_NONE 0
typedef char XML_Char;
typedef unsigned char XML_UChar;
typedef _Null_terminated_ XML_Char* XMLCharPtr;
typedef _Null_terminated_ XML_UChar* XMLUCharPtr;
#if defined(__cplusplus)
extern "C" {
#endif
/* Represents an XML name */
typedef struct _XML_Name
{
/* Pointer to name */
XML_Char* data;
/* Size of name (excluding zero-terminator) */
size_t size;
/* Full namespace URI */
const XML_Char* namespaceUri;
size_t namespaceUriSize;
/* Nonzero if a registered namespace was used */
XML_Char namespaceId;
}
XML_Name;
/* Represents an XML namespace as registered by the client */
typedef struct _XML_RegisteredNameSpace
{
/* URI for this namespace */
const XML_Char* uri;
/* Hash code for uri */
unsigned int uriCode;
/* Single character namespace name expected by client */
XML_Char id;
}
XML_RegisteredNameSpace;
/* Represents an XML namespace as encountered during parsing */
typedef struct _XML_NameSpace
{
/* Namespace name */
const XML_Char* name;
/* Hash code for name */
unsigned int nameCode;
/* URI for this namespace */
const XML_Char* uri;
size_t uriSize;
/* Single character namespace name expected by client */
XML_Char id;
/* Depth at which this definition was encountered */
size_t depth;
}
XML_NameSpace;
void XML_NameSpace_Dump(
_In_ XML_NameSpace* self);
/* Represents an XML attributes */
typedef struct _XML_Attr
{
XML_Name name;
const XML_Char* value;
size_t valueSize;
}
XML_Attr;
/* XML element type tag */
typedef enum _XML_Type
{
XML_NONE,
XML_START,
XML_END,
XML_INSTRUCTION,
XML_CHARS,
XML_COMMENT
}
XML_Type;
/* Represents one XML element */
typedef struct _XML_Elem
{
/* Type of this XML object */
XML_Type type;
/* Tag or character data */
XML_Name data;
/* Attributes */
XML_Attr attrs[XML_MAX_ATTRIBUTES];
size_t attrsSize;
}
XML_Elem;
const XML_Char* XML_Elem_GetAttr(
_Inout_ XML_Elem* self,
XML_Char nsId,
_In_z_ const XML_Char* name);
void XML_Elem_Dump(
_In_ const XML_Elem* self);
typedef struct _XML
{
/* Points to first text character zero-terminated text */
XML_Char* text;
/* Pointer to current character */
XML_Char* ptr;
/* Line number */
size_t line;
/* Status: 0=Okay, 1=Done, 2=Failed */
int status;
/* Error message */
XML_Char message[256];
/* Stack of open tags (used to match closing tags) */
XML_Name stack[XML_MAX_NESTED];
size_t stackSize;
/* Current nesting level */
size_t nesting;
/* Stack of dummy elements generated for empty tags and PutBack calls */
XML_Elem elemStack[XML_MAX_NESTED];
size_t elemStackSize;
/* Array of namespaces */
XML_NameSpace nameSpaces[XML_MAX_NAMESPACES];
size_t nameSpacesSize;
/* Index of last namespace lookup from nameSpaces[] array */
size_t nameSpacesCacheIndex;
/* Predefined namespaces */
XML_RegisteredNameSpace registeredNameSpaces[XML_MAX_NAMESPACES];
size_t registeredNameSpacesSize;
/* Internal parser state */
int state;
/* Whether XML root element has been encountered */
int foundRoot;
}
XML;
void XML_Init(
_Out_ XML* self);
void XML_SetText(
_Inout_ XML* self,
_In_z_ XML_Char* text);
int XML_Next(
_Inout_ XML* self,
_Out_ XML_Elem* elem);
int GetNextSkipCharsAndComments(
_Inout_ XML *xml,
_Out_ XML_Elem *e);
int XML_Expect(
_Inout_ XML* self,
_Out_ XML_Elem* elem,
XML_Type type,
XML_Char nsId,
_In_z_ const XML_Char* name);
int XML_Skip(
_Inout_ XML* self);
int XML_RegisterNameSpace(
_Inout_ XML* self,
XML_Char id,
_In_z_ const XML_Char* uri);
int XML_PutBack(
_Inout_ XML* self,
_In_ const XML_Elem* elem);
int XML_StripWhitespace(
_Inout_ XML_Elem* elem);
void XML_Dump(
_In_ XML* self);
void XML_PutError(_Inout_ XML* self);
int XML_ParseCharFault(const XML *self,
const XML_Char *data,
XML_Char *buffer,
size_t buf_size);
#define XML_ERROR_BAD_ENTITY_REFERENCE ZT("Failed to parse XML. Bad entity reference. Only these are supported: '<', '>', '&', '"', '''.")
#define XML_ERROR_BAD_CHARACTER_REFERENCE ZT("Failed to parse XML. Bad character reference. Only character references in the range of 0 to 255 are supported.")
#define XML_ERROR_UNDEFINED_NAMESPACE_PREFIX ZT("Failed to parse XML. Undefined namespace prefix found '%T'.")
#define XML_ERROR_EXPECTED_ATTRIBUTE_NAME ZT("Failed to parse XML. An attribute name was expected.")
#define XML_ERROR_EXPECTED_ATTRIBUTE_EQUALS ZT("Failed to parse XML. An '=' character was expected while parsing attribute '%T'.")
#define XML_ERROR_EXPECTED_ATTRIBUTE_OPENING_QUOTES ZT("Failed to parse XML. An opening quote character was expected while parsing attribute '%T'.")
#define XML_ERROR_EXPECTED_ATTRIBUTE_CLOSING_QUOTES ZT("Failed to parse XML. An closing quote character was expected while parsing attribute '%T'.")
#define XML_ERROR_TOO_MANY_NAMESPACES ZT("Failed to parse XML. Too many namespaces were detected. A maximum of %u namespaces are allowed.")
#define XML_ERROR_TOO_MANY_ATTRIBUTES ZT("Failed to parse XML. Too many attributes were detected on element '%T'. A maximum of %u attributes are allowed per element.")
#define XML_ERROR_END_OF_XML_INSTRUCTION ZT("Failed to parse XML. The end of the XML was detected while processing an XML instruction.")
#define XML_ERROR_END_OF_INSTRUCTION_MISSING ZT("Failed to parse XML. The end of the XML instruction was not properly terminated with an '?>'.")
#define XML_ERROR_ELEMENT_NAME_EXPECTED ZT("Failed to parse XML. An element name was expected while decoding an element start tag.")
#define XML_ERROR_ELEMENT_NAME_PREMATURE_END ZT("Failed to parse XML. The end of the XML was detected while processing an XML element name for a element start tag.")
#define XML_ERROR_ELEMENT_DEPTH_OVERFLOW ZT("Failed to parse XML. XML element nesting is too deep. A maximum element depth of %u is supported.")
#define XML_ERROR_ELEMENT_NAME_NOT_CLOSED ZT("Failed to parse XML. The XML element '%T' was not terminated with a '>' while decoding an element start tag.")
#define XML_ERROR_ELEMENT_NAME_EXPECTED_ELEM_END ZT("Failed to parse XML. An element name was expected while decoding an element end tag.")
#define XML_ERROR_ELEMENT_NAME_PREMATURE_END_ELEM_END ZT("Failed to parse XML. The end of the XML was detected while processing an XML element name for a element end tag.")
#define XML_ERROR_ELEMENT_NAME_NOT_CLOSED_ELEM_END ZT("Failed to parse XML. The XML element '%T' was not terminated with a '>' while decoding an element end tag.")
#define XML_ERROR_ELEMENT_TOO_MANY_ENDS ZT("Failed to parse XML. More element end tags were found than element starting tags. The ending tag found is '%T'.")
#define XML_ERROR_ELEMENT_END_ELEMENT_TAG_NOT_MATCH_START_TAG ZT("Failed to parse XML. The XML element end tag expected was '%T', but what was found was '%T'.")
#define XML_ERROR_COMMENT_END_EXPECTED ZT("Failed to parse XML. Double minus signs in comments are not allowed, unless used to terminate comment. '>' was not found.")
#define XML_ERROR_COMMENT_PREMATURE_END ZT("Failed to parse XML. The end of the XML was detected while processing a comment.")
#define XML_ERROR_CDATA_PREMATURE_END ZT("Failed to parse XML. The end of the XML was detected while processing a CDATA.")
#define XML_ERROR_DOCTYPE_PREMATURE_END ZT("Failed to parse XML. The end of the XML was detected while processing a DOCTYPE.")
#define XML_ERROR_CHARDATA_EXPECTED_ELEMENT_END_TAG ZT("Failed to parse XML. While processing the element data no element end tag was discovered.")
#define XML_ERROR_OPEN_ANGLE_BRACKET_EXPECTED ZT("Failed to parse XML. An open angle bracket '<' was expected and not found.")
#define XML_ERROR_COMMENT_CDATA_DOCTYPE_EXPECTED ZT("Failed to parse XML. A comment, CDATA or DOCTYPE element was expected and not found.")
#define XML_ERROR_ELEMENT_EXPECTED ZT("Failed to parse XML. An XML element was expected and not found.")
#define XML_ERROR_UNEXPECTED_STATE ZT("Failed to parse XML. The XML parser hit an interal problem that stopped it from progressing.")
#define XML_ERROR_SPECIFIC_ELEMENT_EXPECTED ZT("Failed to parse XML. The element name %T was expected but %T was found instead.")
#define XML_ERROR_SPECIFIC_END_ELEMENT_EXPECTED ZT("Failed to parse XML. The element name %T end tag was expected but %T was found instead.")
#define XML_ERROR_CHARACTER_DATA_EXPECTED ZT("Failed to parse XML. Character data was expected but not found.")
#define WSMAN_ERROR_NO_CLASS_NAME_IN_SELECTOR ZT("Failed to process WS-Management packet. The class name was not found in the selector.")
#define WSMAN_ERROR_NO_RESOURCE_URI ZT("Failed to process WS-Management packet. The resource URI was not found.")
#define WSMAN_ERROR_OUTOFMEMORY ZT("Failed to process WS-Management packet. Out of memory.")
#define WSMAN_ERROR_BAD_SELECTOR ZT("Failed to process WS-Management packet. Character data or the element EndPointReference was expected in the selector but not found.")
#define WSMAN_ERROR_BAD_EPR_IN_SELECTOR ZT("Failed to process WS-Management packet. The element EndPointReference in the selector could not be parsed.")
void XML_Raise(XML* self, _In_z_ const XML_Char* format, ...);
void XML_FormatError(_Inout_ XML* self, _Out_writes_z_(size) XML_Char* buffer, size_t size);
#if defined(__cplusplus)
} /* extern "C" */
#endif
#endif /* _omiar_xml_h */
|
{
"pile_set_name": "Github"
}
|
<?php
/**
* Bitrix Framework
* @package bitrix
* @subpackage iblock
*/
namespace Bitrix\Iblock\PropertyIndex;
class Storage
{
protected $iblockId = 0;
protected static $exists = array();
const PRICE = 1;
const DICTIONARY = 2;
const STRING = 3;
const NUMERIC = 4;
const DATETIME = 5;
/**
* @param integer $iblockId Information block identifier.
*/
public function __construct($iblockId)
{
$this->iblockId = intval($iblockId);
}
/**
* Returns information block identifier.
*
* @return integer
*/
/**
* <p>Метод возвращает идентификатор инфоблока. Нестатический метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/getiblockid.php
* @author Bitrix
*/
public function getIblockId()
{
return $this->iblockId;
}
/**
* Internal method to get database table name for storing property index.
*
* @return string
*/
/**
* <p>Метод возвращает название таблицы базы данных для хранения индекса свойств. Нестатический внутренний метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return string
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/gettablename.php
* @author Bitrix
*/
public function getTableName()
{
return "b_iblock_".$this->iblockId."_index";
}
/**
* Checks if property index exists in the database.
* Returns true on success.
*
* @return boolean
*/
/**
* <p>Метод проверяет существование в базе данных индекса для свойства. Нестатический метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return boolean
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/isexists.php
* @author Bitrix
*/
public function isExists()
{
if (!array_key_exists($this->iblockId, self::$exists))
{
$connection = \Bitrix\Main\Application::getConnection();
self::$exists[$this->iblockId] = $connection->isTableExists($this->getTableName());
}
return self::$exists[$this->iblockId];
}
/**
* Creates new property values index for information block.
* You have to be sure that index does not exists.
*
* @return void
*/
/**
* <p>Метод создает новый индекс значений свойства информационного блока. Перед вызовом метода необходимо убедиться, что такой индекс еще не существует. Нестатический метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return void
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/create.php
* @author Bitrix
*/
public function create()
{
$connection = \Bitrix\Main\Application::getConnection();
$connection->createTable($this->getTableName(), array(
"SECTION_ID" => new \Bitrix\Main\Entity\IntegerField("SECTION_ID", array(
'required' => true,
)),
"ELEMENT_ID" => new \Bitrix\Main\Entity\IntegerField("ELEMENT_ID", array(
'required' => true,
)),
"FACET_ID" => new \Bitrix\Main\Entity\IntegerField("FACET_ID", array(
'required' => true,
)),
"VALUE" => new \Bitrix\Main\Entity\IntegerField("VALUE", array(
'required' => true,
)),
"VALUE_NUM" => new \Bitrix\Main\Entity\FloatField("VALUE_NUM", array(
'required' => true,
)),
"INCLUDE_SUBSECTIONS" => new \Bitrix\Main\Entity\BooleanField("INCLUDE_SUBSECTIONS", array(
'required' => true,
'values' => array(0, 1),
)),
), array("SECTION_ID", "FACET_ID", "VALUE", "VALUE_NUM", "ELEMENT_ID"));
$connection->createIndex($this->getTableName(), 'IX_'.$this->getTableName().'_0', array("SECTION_ID", "FACET_ID", "VALUE_NUM", "VALUE", "ELEMENT_ID"));
$connection->createIndex($this->getTableName(), 'IX_'.$this->getTableName().'_1', array("ELEMENT_ID", "SECTION_ID", "FACET_ID"));
self::$exists[$this->iblockId] = true;
}
/**
* Deletes existing index from the database.
* You have to check that index exists before calling this method.
*
* @return void
*/
/**
* <p>Метод удаляет существующий индекс из базы данных. Перед вызовом метода необходимо убедиться в том, что индекс существует. Нестатический метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return void
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/drop.php
* @author Bitrix
*/
public function drop()
{
$connection = \Bitrix\Main\Application::getConnection();
$connection->dropTable($this->getTableName());
self::$exists[$this->iblockId] = false;
}
/**
* Returns maximum stored element identifier.
*
* @return int
*/
/**
* <p>Метод возвращает максимальный идентификатор хранящегося элемента. Нестатический метод.</p> <p>Без параметров</p> <a name="example"></a>
*
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/getlaststoredelementid.php
* @author Bitrix
*/
public function getLastStoredElementId()
{
$connection = \Bitrix\Main\Application::getConnection();
$max = $connection->queryScalar("select max(ELEMENT_ID) ELEMENT_MAX from ".$this->getTableName());
return $max > 0? $max: 0;
}
/**
* Adds new index entry.
*
* @param integer $sectionId Identifier of the element section.
* @param integer $elementId Identifier of the element.
* @param integer $facetId Identifier of the property/price.
* @param integer $value Dictionary value or 0.
* @param float $valueNum Value of an numeric property or price.
* @param boolean $includeSubsections If section has parent or direct element connection.
*
* @return boolean
*/
/**
* <p>Метод добавляет новую запись индекса. Нестатический метод.</p>
*
*
* @param integer $sectionId Идентификатор секции элемента.
*
* @param integer $elementId Идентификатор элемента.
*
* @param integer $facetId Идентификатор свойства/цены.
*
* @param integer $value Значение словаря или 0.
*
* @param float $valueNum Значение числового свойства или цены.
*
* @param boolean $includeSubsections Параметр принимает <i>true</i>, если секция имеет родителя, в
* противном случае указывается <i>false</i>.
*
* @return boolean
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/addindexentry.php
* @author Bitrix
*/
public function addIndexEntry($sectionId, $elementId, $facetId, $value, $valueNum, $includeSubsections)
{
$connection = \Bitrix\Main\Application::getConnection();
try
{
$connection->query("
INSERT INTO ".$this->getTableName()." (
SECTION_ID
,ELEMENT_ID
,FACET_ID
,VALUE
,VALUE_NUM
,INCLUDE_SUBSECTIONS
) VALUES (
".intval($sectionId)."
,".intval($elementId)."
,".intval($facetId)."
,".intval($value)."
,".doubleval($valueNum)."
,".($includeSubsections > 0? 1: 0)."
)
");
}
catch (\Bitrix\Main\DB\SqlException $e)
{
return false;
}
return true;
}
/**
* Deletes all element entries from the index.
*
* @param integer $elementId Identifier of the element to be deleted.
*
* @return boolean
*/
/**
* <p>Метод удаляет все записи для элемента из индекса. Нестатический метод.</p>
*
*
* @param integer $elementId Идентификатор элемента, записи которого необходимо удалить.
*
* @return boolean
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/deleteindexelement.php
* @author Bitrix
*/
public function deleteIndexElement($elementId)
{
$connection = \Bitrix\Main\Application::getConnection();
$connection->query("DELETE from ".$this->getTableName()." WHERE ELEMENT_ID = ".intval($elementId));
return true;
}
/**
* Converts iblock property identifier into internal storage facet identifier.
*
* @param integer $propertyId Property identifier.
* @return integer
*/
/**
* <p>Метод преобразует идентификатор свойства инфоблока во внутренний идентификатор фасеты. Метод статический.</p>
*
*
* @param integer $propertyId Идентификатор свойства инфоблока.
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/propertyidtofacetid.php
* @author Bitrix
*/
public static function propertyIdToFacetId($propertyId)
{
return intval($propertyId * 2);
}
/**
* Converts catalog price identifier into internal storage facet identifier.
*
* @param integer $priceId Price identifier.
* @return integer
*/
/**
* <p>Метод преобразует идентификатор цены во внутренний идентификатор фасеты. Метод статический.</p>
*
*
* @param integer $priceId Идентификатор цены.
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/priceidtofacetid.php
* @author Bitrix
*/
public static function priceIdToFacetId($priceId)
{
return intval($priceId * 2 + 1);
}
/**
* Returns true if given identifier is catalog price one.
*
* @param integer $facetId Internal storage facet identifier.
*
* @return boolean
*/
/**
* <p>Метод возвращает <i>true</i>, если заданный идентификатор является идентификатором цены каталога. Метод статический.</p>
*
*
* @param integer $facetId Внутренний идентификатор фасеты.
*
* @return boolean
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/ispriceid.php
* @author Bitrix
*/
public static function isPriceId($facetId)
{
return ($facetId % 2) != 0;
}
/**
* Returns true if given identifier is iblock property one.
*
* @param integer $facetId Internal storage facet identifier.
*
* @return boolean
*/
/**
* <p>Метод возвращает <i>true</i>, если заданный идентификатор является идентификатором свойства инфоблока. Метод статический.</p>
*
*
* @param integer $facetId Внутренний идентификатор фасеты.
*
* @return boolean
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/ispropertyid.php
* @author Bitrix
*/
public static function isPropertyId($facetId)
{
return ($facetId % 2) == 0;
}
/**
* Converts internal storage facet identifier into iblock property identifier.
*
* @param integer $facetId Internal storage facet identifier.
*
* @return integer
*/
/**
* <p>Метод преобразует внутренний идентификатор фасеты в идентификатор свойства инфоблока. Метод статический.</p>
*
*
* @param integer $facetId Внутренний идентификатор фасеты.
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/facetidtopropertyid.php
* @author Bitrix
*/
public static function facetIdToPropertyId($facetId)
{
return intval($facetId / 2);
}
/**
* Converts internal storage facet identifier into catalog price identifier.
*
* @param integer $facetId Internal storage facet identifier.
*
* @return integer
*/
/**
* <p>Метод преобразует внутренний идентификатор фасеты в идентификатор цены каталога. Метод статический.</p>
*
*
* @param integer $facetId Внутренний идентификатор фасеты.
*
* @return integer
*
* @static
* @link http://dev.1c-bitrix.ru/api_d7/bitrix/iblock/propertyindex/storage/facetidtopriceid.php
* @author Bitrix
*/
public static function facetIdToPriceId($facetId)
{
return intval(($facetId - 1) / 2);
}
}
|
{
"pile_set_name": "Github"
}
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PACKET_MATH_SSE_H
#define EIGEN_PACKET_MATH_SSE_H
namespace Eigen {
namespace internal {
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
#endif
#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
#endif
typedef __m128 Packet4f;
typedef __m128i Packet4i;
typedef __m128d Packet2d;
template<> struct is_arithmetic<__m128> { enum { value = true }; };
template<> struct is_arithmetic<__m128i> { enum { value = true }; };
template<> struct is_arithmetic<__m128d> { enum { value = true }; };
#define vec4f_swizzle1(v,p,q,r,s) \
(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
#define vec4i_swizzle1(v,p,q,r,s) \
(_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
#define vec2d_swizzle1(v,p,q) \
(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
#define vec4f_swizzle2(a,b,p,q,r,s) \
(_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
#define vec4i_swizzle2(a,b,p,q,r,s) \
(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
const Packet2d p2d_##NAME = pset1<Packet2d>(X)
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
template<> struct packet_traits<float> : default_packet_traits
{
typedef Packet4f type;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
size=4,
HasDiv = 1,
HasSin = EIGEN_FAST_MATH,
HasCos = EIGEN_FAST_MATH,
HasLog = 1,
HasExp = 1,
HasSqrt = 1
};
};
template<> struct packet_traits<double> : default_packet_traits
{
typedef Packet2d type;
enum {
Vectorizable = 1,
AlignedOnScalar = 1,
size=2,
HasDiv = 1,
HasExp = 1,
HasSqrt = 1
};
};
template<> struct packet_traits<int> : default_packet_traits
{
typedef Packet4i type;
enum {
// FIXME check the Has*
Vectorizable = 1,
AlignedOnScalar = 1,
size=4
};
};
template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
#if defined(_MSC_VER) && (_MSC_VER==1500)
// Workaround MSVC 9 internal compiler error.
// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
#else
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set1_ps(from); }
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
#endif
template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
{
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
return _mm_xor_ps(a,mask);
}
template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
{
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
return _mm_xor_pd(a,mask);
}
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
{
return psub(_mm_setr_epi32(0,0,0,0), a);
}
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
{
#ifdef EIGEN_VECTORIZE_SSE4_1
return _mm_mullo_epi32(a,b);
#else
// this version is slightly faster than 4 scalar products
return vec4i_swizzle1(
vec4i_swizzle2(
_mm_mul_epu32(a,b),
_mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
vec4i_swizzle1(b,1,0,3,2)),
0,2,0,2),
0,2,1,3);
#endif
}
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
{ eigen_assert(false && "packet integer division are not supported by SSE");
return pset1<Packet4i>(0);
}
// for some weird raisons, it has to be overloaded for packet of integers
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
{
#ifdef EIGEN_VECTORIZE_SSE4_1
return _mm_min_epi32(a,b);
#else
// after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmplt_epi32(a,b);
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
#endif
}
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
{
#ifdef EIGEN_VECTORIZE_SSE4_1
return _mm_max_epi32(a,b);
#else
// after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmpgt_epi32(a,b);
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
#endif
}
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
#if defined(_MSC_VER)
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
EIGEN_DEBUG_UNALIGNED_LOAD
#if (_MSC_VER==1600)
// NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
// (i.e., it does not generate an unaligned load!!
// TODO On most architectures this version should also be faster than a single _mm_loadu_ps
// so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
__m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
res = _mm_loadh_pi(res, (const __m64*)(from+2));
return res;
#else
return _mm_loadu_ps(from);
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
#else
// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
// require pointer casting to incompatible pointer types and leads to invalid code
// because of the strict aliasing rule. The "dummy" stuff are required to enforce
// a correct instruction dependency.
// TODO: do the same for MSVC (ICC is compatible)
// NOTE: with the code below, MSVC's compiler crashes!
#if defined(__GNUC__) && defined(__i386__)
// bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
#elif defined(__clang__)
// bug 201: Segfaults in __mm_loadh_pd with clang 2.8
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
#else
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
#endif
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
{
EIGEN_DEBUG_UNALIGNED_LOAD
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
return _mm_loadu_ps(from);
#else
__m128d res;
res = _mm_load_sd((const double*)(from)) ;
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
return _mm_castpd_ps(res);
#endif
}
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
{
EIGEN_DEBUG_UNALIGNED_LOAD
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
return _mm_loadu_pd(from);
#else
__m128d res;
res = _mm_load_sd(from) ;
res = _mm_loadh_pd(res,from+1);
return res;
#endif
}
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
{
EIGEN_DEBUG_UNALIGNED_LOAD
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
#else
__m128d res;
res = _mm_load_sd((const double*)(from)) ;
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
return _mm_castpd_si128(res);
#endif
}
#endif
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
{
return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
}
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
{ return pset1<Packet2d>(from[0]); }
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
{
Packet4i tmp;
tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
return vec4i_swizzle1(tmp, 0, 0, 1, 1);
}
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
EIGEN_DEBUG_UNALIGNED_STORE
_mm_storel_pd((to), from);
_mm_storeh_pd((to+1), from);
}
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
// some compilers might be tempted to perform multiple moves instead of using a vector path.
template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
{
Packet4f pa = _mm_set_ss(a);
pstore(to, vec4f_swizzle1(pa,0,0,0,0));
}
// some compilers might be tempted to perform multiple moves instead of using a vector path.
template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
{
Packet2d pa = _mm_set_sd(a);
pstore(to, vec2d_swizzle1(pa,0,0));
}
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
// Direct of the struct members fixed bug #62.
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
#else
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
#endif
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
{ return _mm_shuffle_ps(a,a,0x1B); }
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
{ return _mm_shuffle_pd(a,a,0x1); }
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
{ return _mm_shuffle_epi32(a,0x1B); }
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
{
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
return _mm_and_ps(a,mask);
}
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
{
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
return _mm_and_pd(a,mask);
}
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
{
#ifdef EIGEN_VECTORIZE_SSSE3
return _mm_abs_epi32(a);
#else
Packet4i aux = _mm_srai_epi32(a,31);
return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
#endif
}
EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
{
vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
}
#ifdef EIGEN_VECTORIZE_SSE3
// TODO implement SSE2 versions as well as integer versions
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
{
return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
}
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
{
return _mm_hadd_pd(vecs[0], vecs[1]);
}
// SSSE3 version:
// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
// {
// return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
// }
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
Packet4f tmp0 = _mm_hadd_ps(a,a);
return pfirst(_mm_hadd_ps(tmp0, tmp0));
}
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
// SSSE3 version:
// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
// {
// Packet4i tmp0 = _mm_hadd_epi32(a,a);
// return pfirst(_mm_hadd_epi32(tmp0, tmp0));
// }
#else
// SSE2 versions
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
}
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
{
return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
}
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
{
Packet4f tmp0, tmp1, tmp2;
tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
tmp0 = _mm_add_ps(tmp0, tmp1);
tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
tmp1 = _mm_add_ps(tmp1, tmp2);
tmp2 = _mm_movehl_ps(tmp1, tmp0);
tmp0 = _mm_movelh_ps(tmp0, tmp1);
return _mm_add_ps(tmp0, tmp2);
}
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
{
return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
}
#endif // SSE3
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
}
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
{
Packet4i tmp0, tmp1, tmp2;
tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
tmp0 = _mm_add_epi32(tmp0, tmp1);
tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
tmp1 = _mm_add_epi32(tmp1, tmp2);
tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
return _mm_add_epi32(tmp0, tmp2);
}
// Other reduction functions:
// mul
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
}
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
{
return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
}
template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
{
// after some experiments, it is seems this is the fastest way to implement it
// for GCC (eg., reusing pmul is very slow !)
// TODO try to call _mm_mul_epu32 directly
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
}
// min
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
}
template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
{
return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
}
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
{
// after some experiments, it is seems this is the fastest way to implement it
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
return aux0<aux2 ? aux0 : aux2;
}
// max
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
{
Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
}
template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
{
return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
}
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
{
// after some experiments, it is seems this is the fastest way to implement it
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
return aux0>aux2 ? aux0 : aux2;
}
#if (defined __GNUC__)
// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
// {
// Packet4f res = b;
// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
// return res;
// }
// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
// {
// Packet4i res = a;
// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
// return res;
// }
#endif
#ifdef EIGEN_VECTORIZE_SSSE3
// SSSE3 versions
template<int Offset>
struct palign_impl<Offset,Packet4f>
{
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
{
if (Offset!=0)
first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
}
};
template<int Offset>
struct palign_impl<Offset,Packet4i>
{
static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
{
if (Offset!=0)
first = _mm_alignr_epi8(second,first, Offset*4);
}
};
template<int Offset>
struct palign_impl<Offset,Packet2d>
{
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
{
if (Offset==1)
first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
}
};
#else
// SSE2 versions
template<int Offset>
struct palign_impl<Offset,Packet4f>
{
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
{
if (Offset==1)
{
first = _mm_move_ss(first,second);
first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
}
else if (Offset==2)
{
first = _mm_movehl_ps(first,first);
first = _mm_movelh_ps(first,second);
}
else if (Offset==3)
{
first = _mm_move_ss(first,second);
first = _mm_shuffle_ps(first,second,0x93);
}
}
};
template<int Offset>
struct palign_impl<Offset,Packet4i>
{
static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
{
if (Offset==1)
{
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
first = _mm_shuffle_epi32(first,0x39);
}
else if (Offset==2)
{
first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
}
else if (Offset==3)
{
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
}
}
};
template<int Offset>
struct palign_impl<Offset,Packet2d>
{
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
{
if (Offset==1)
{
first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
}
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PACKET_MATH_SSE_H
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<html>
<head>
<title>FLOOR.PRECISE Function</title>
<meta charset="utf-8" />
<meta name="description" content="" />
<link type="text/css" rel="stylesheet" href="../editor.css" />
</head>
<body>
<div class="mainpart">
<h1>FLOOR.PRECISE Function</h1>
<p>The <b>FLOOR.PRECISE</b> function is one of the math and trigonometry functions. It is used to return a number that is rounded down to the nearest integer or to the nearest multiple of significance. The number is always rounded down regardless of its sing.</p>
<p>The <b>FLOOR.PRECISE</b> function syntax is:</p>
<p style="text-indent: 150px;"><b><em>FLOOR.PRECISE(x [, significance])</em></b></p>
<p><em>where</em></p>
<p style="text-indent: 50px;"><b><em>x</em></b> is the number you wish to round down.</p>
<p style="text-indent: 50px;"><b><em>significance</em></b> is the multiple of significance you wish to round down to. It is an optional parameter. If it is omitted, the default value of 1 is used. If it is set to zero, the function returns 0.</p>
<p>The numeric values can be entered manually or included into the cell you make reference to.</p>
<p>To apply the <b>FLOOR.PRECISE</b> function,</p>
<ol>
<li>select the cell where you wish to display the result,</li>
<li>click the <b>Insert Function</b> <img alt="Insert Function icon" src="../images/insertfunction.png" /> icon situated at the top toolbar,
<br />or right-click within a selected cell and select the <b>Insert Function</b> option from the menu,
<br />or click the <img alt="Function icon" src="../images/function.png" /> icon situated at the formula bar,
</li>
<li>select the <b>Math and trigonometry</b> function group from the list,</li>
<li>click the <b>FLOOR.PRECISE</b> function,</li>
<li>enter the required arguments separating them by comma,</li>
<li>press the <b>Enter</b> button.</li>
</ol>
<p>The result will be displayed in the selected cell.</p>
<p style="text-indent: 150px;"><img alt="FLOOR.PRECISE Function" src="../images/floorprecise.png" /></p>
</div>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
# Copyright (c) 2010-2013 Oracle and/or its affiliates. All rights reserved.
#
# The contents of this file are subject to the terms of either the GNU
# General Public License Version 2 only ("GPL") or the Common Development
# and Distribution License("CDDL") (collectively, the "License"). You
# may not use this file except in compliance with the License. You can
# obtain a copy of the License at
# https://glassfish.dev.java.net/public/CDDL+GPL_1_1.html
# or packager/legal/LICENSE.txt. See the License for the specific
# language governing permissions and limitations under the License.
#
# When distributing the software, include this License Header Notice in each
# file and include the License file at packager/legal/LICENSE.txt.
#
# GPL Classpath Exception:
# Oracle designates this particular file as subject to the "Classpath"
# exception as provided by Oracle in the GPL Version 2 section of the License
# file that accompanied this code.
#
# Modifications:
# If applicable, add the following below the License Header, with the fields
# enclosed by brackets [] replaced by your own identifying information:
# "Portions Copyright [year] [name of copyright owner]"
#
# Contributor(s):
# If you wish your version of this file to be governed by only the CDDL or
# only the GPL Version 2, indicate your decision by adding "[Contributor]
# elects to include this software in this distribution under the [CDDL or GPL
# Version 2] license." If you don't indicate a single choice of license, a
# recipient has the option to distribute your version of this file under
# either the CDDL, the GPL Version 2 or to extend the choice of license to
# its licensees as provided above. However, if you add GPL Version 2 code
# and therefore, elected the GPL Version 2 license, then the option applies
# only if the new code is made subject to such option by the copyright
# holder.
#
ejb.embedded.exception_exists_container=\uB0B4\uC7A5\uB41C EJBContainer\uB97C \uC0C8\uB85C \uC0DD\uC131\uD560 \uC218 \uC5C6\uC74C: \uC774\uC804\uC5D0 \uC0DD\uC131\uB41C \uCEE8\uD14C\uC774\uB108\uAC00 \uB2EB\uD788\uC9C0 \uC54A\uC558\uC2B5\uB2C8\uB2E4.
ejb.embedded.failed_create_temporary_domain_xml_file=\uC784\uC2DC domain.xml \uD30C\uC77C \uC0DD\uC131\uC744 \uC2E4\uD328\uD588\uC2B5\uB2C8\uB2E4. \uC790\uC138\uD55C \uB0B4\uC6A9\uC740 \uBBF8\uC138 \uB85C\uADF8 \uB808\uBCA8\uC744 \uC0AC\uC6A9\uD558\uC2ED\uC2DC\uC624.
ejb.embedded.exception_creating_temporary_domain_xml_file=\uC784\uC2DC domain.xml \uD30C\uC77C\uC744 \uC0DD\uC131\uD558\uB824\uB294 \uC911 \uC608\uC0C1\uCE58 \uC54A\uC740 \uC608\uC678 \uC0AC\uD56D\uC774 \uBC1C\uC0DD\uD588\uC2B5\uB2C8\uB2E4.
ejb.embedded.no_matching_end_element=[{0}]\uC5D0 \uB300\uD574 \uC77C\uCE58\uD558\uB294 \uC885\uB8CC \uC694\uC18C\uAC00 \uC5C6\uC2B5\uB2C8\uB2E4.
|
{
"pile_set_name": "Github"
}
|
//----------------------------------------------------------------------------
//
// TSDuck - The MPEG Transport Stream Toolkit
// Copyright (c) 2005-2020, Thierry Lelegard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------
//!
//! @file
//! Representation of an AVC sequence parameter set access unit.
//! AVC is Advanced Video Coding, ISO 14496-10, ITU H.264.
//!
//----------------------------------------------------------------------------
#pragma once
#include "tsAbstractAVCAccessUnit.h"
#include "tsAVCVUIParameters.h"
#include "tsMPEG.h"
namespace ts {
//!
//! Representation of an AVC sequence parameter set access unit.
//! @ingroup mpeg
//!
//! AVC is Advanced Video Coding, ISO 14496-10, ITU H.264.
//!
class TSDUCKDLL AVCSequenceParameterSet: public AbstractAVCAccessUnit
{
public:
//!
//! Reference to the superclass.
//!
typedef AbstractAVCAccessUnit SuperClass;
//!
//! Constructor from a binary area.
//! @param [in] data Address of binary data to analyze.
//! @param [in] size Size in bytes of binary data to analyze.
//!
AVCSequenceParameterSet(const void* data = nullptr, size_t size = 0);
// Inherited methods
virtual void clear() override;
virtual std::ostream& display(std::ostream& strm = std::cout, const UString& margin = UString()) const override;
//!
//! Get chroma_format_idc, applying default value (see H.264 7.4.2.1.1).
//! @return The chroma_format_idc, applying default value.
//!
uint8_t chroma() const {return extension1() ? chroma_format_idc : uint8_t(CHROMA_420);}
//!
//! Get separate_colour_plane_flag, applying default value (see H.264 7.4.2.1.1).
//! @return The separate_colour_plane_flag, applying default value.
//!
uint8_t separateColourPlaneFlag() const {return extension1() && chroma_format_idc == 3 ? separate_colour_plane_flag : 0;}
//!
//! The ChromaArrayType variable (see H.264 7.4.2.1.1).
//! @return The ChromaArrayType variable.
//!
uint8_t chromaArrayType() const {return separateColourPlaneFlag() == 0 ? chroma() : 0;}
//!
//! The SubWidthC variable (see H.264 6.2).
//! @return The SubWidthC variable (see H.264 6.2).
//!
uint32_t subWidthC() const;
//!
//! The SubHeightC variable (see H.264 6.2).
//! @return The SubHeightC variable (see H.264 6.2).
//!
uint32_t subHeightC() const;
//!
//! The CropUnitX variable (see H.264 7.4.2.1.1).
//! @return The CropUnitX variable.
//!
uint32_t cropUnitX() const;
//!
//! The CropUnitY variable (see H.264 7.4.2.1.1).
//! @return The CropUnitY variable.
//!
uint32_t cropUnitY() const;
//!
//! Frame width in pixels.
//! @return The frame width in pixels.
//!
uint32_t frameWidth() const;
//!
//! Frame height in pixels.
//! @return The frame height in pixels.
//!
uint32_t frameHeight() const;
//!
//! Check validity of extension fields 1.
//! @return True if extension fields 1 are valid.
//!
bool extension1() const;
// Sequence parameter set fields.
// See ISO/IEC 14496-10 sections 7.3.2.1 and 7.4.2.1.
uint8_t profile_idc; //!< profile_idc
uint8_t constraint_set0_flag; //!< constraint_set0_flag
uint8_t constraint_set1_flag; //!< constraint_set1_flag
uint8_t constraint_set2_flag; //!< constraint_set2_flag
uint8_t constraint_set3_flag; //!< constraint_set3_flag
uint8_t reserved_zero_4bits; //!< reserved_zero_4bits
uint8_t level_idc; //!< level_idc
uint32_t seq_parameter_set_id; //!< seq_parameter_set_id
// if (extension1()) {
uint8_t chroma_format_idc; //!< chroma_format_idc
// if (chroma_format_idc == 3) {
uint8_t separate_colour_plane_flag; //!< separate_colour_plane_flag
// }
uint32_t bit_depth_luma_minus8; //!< bit_depth_luma_minus8
uint32_t bit_depth_chroma_minus8; //!< bit_depth_chroma_minus8
uint8_t qpprime_y_zero_transform_bypass_flag; //!< qpprime_y_zero_transform_bypass_flag
uint8_t seq_scaling_matrix_present_flag; //!< seq_scaling_matrix_present_flag
// scaling lists not stored in class AVCSequenceParameterSet
// }
uint32_t log2_max_frame_num_minus4; //!< log2_max_frame_num_minus4
uint32_t pic_order_cnt_type; //!< pic_order_cnt_type
// if (pic_order_cnt_type == 0) {
uint32_t log2_max_pic_order_cnt_lsb_minus4; //!< log2_max_pic_order_cnt_lsb_minus4
// }
// else if (pic_order_cnt_type == 1) {
uint8_t delta_pic_order_always_zero_flag; //!< delta_pic_order_always_zero_flag
int32_t offset_for_non_ref_pic; //!< offset_for_non_ref_pic
int32_t offset_for_top_to_bottom_field; //!< offset_for_top_to_bottom_field
uint32_t num_ref_frames_in_pic_order_cnt_cycle; //!< num_ref_frames_in_pic_order_cnt_cycle
std::vector<int32_t> offset_for_ref_frame; //!< offset_for_ref_frame
// }
uint32_t num_ref_frames; //!< num_ref_frames
uint8_t gaps_in_frame_num_value_allowed_flag; //!< gaps_in_frame_num_value_allowed_flag
uint32_t pic_width_in_mbs_minus1; //!< pic_width_in_mbs_minus1
uint32_t pic_height_in_map_units_minus1; //!< pic_height_in_map_units_minus1
uint8_t frame_mbs_only_flag; //!< frame_mbs_only_flag
// if (!frame_mbs_only_flag) {
uint8_t mb_adaptive_frame_field_flag; //!< mb_adaptive_frame_field_flag
// }
uint8_t direct_8x8_inference_flag; //!< direct_8x8_inference_flag
uint8_t frame_cropping_flag; //!< frame_cropping_flag
// if (frame_cropping_flag) {
uint32_t frame_crop_left_offset; //!< frame_crop_left_offset
uint32_t frame_crop_right_offset; //!< frame_crop_right_offset
uint32_t frame_crop_top_offset; //!< frame_crop_top_offset
uint32_t frame_crop_bottom_offset; //!< frame_crop_bottom_offset
// }
uint8_t vui_parameters_present_flag; //!< vui_parameters_present_flag
// if (vui_parameters_present_flag) {
AVCVUIParameters vui; //!< vui
// }
// Validity of RBSP trailing bits
bool rbsp_trailing_bits_valid; //!< rbsp_trailing_bits_valid
size_t rbsp_trailing_bits_count; //!< rbsp_trailing_bits_count
protected:
//!
//! Parse the body of the binary access unit.
//! @param [in,out] parser The AVC parser.
//! @return The "valid" flag.
//!
virtual bool parseBody(AVCParser& parser) override;
};
}
|
{
"pile_set_name": "Github"
}
|
#!/bin/sh
HERE="`echo $0 | sed -e 's|[^/]*$||'`"
OPENSSL="${HERE}../apps/openssl"
if [ -d "${HERE}../engines" -a "x$OPENSSL_ENGINES" = "x" ]; then
OPENSSL_ENGINES="${HERE}../engines"; export OPENSSL_ENGINES
fi
if [ -x "${OPENSSL}.exe" ]; then
# The original reason for this script existence is to work around
# certain caveats in run-time linker behaviour. On Windows platforms
# adjusting $PATH used to be sufficient, but with introduction of
# SafeDllSearchMode in XP/2003 the only way to get it right in
# *all* possible situations is to copy newly built .DLLs to apps/
# and test/, which is now done elsewhere... The $PATH is adjusted
# for backward compatibility (and nostagical reasons:-).
if [ "$OSTYPE" != msdosdjgpp ]; then
PATH="${HERE}..:$PATH"; export PATH
fi
exec "${OPENSSL}.exe" "$@"
elif [ -x "${OPENSSL}" -a -x "${HERE}shlib_wrap.sh" ]; then
exec "${HERE}shlib_wrap.sh" "${OPENSSL}" "$@"
else
exec "${OPENSSL}" "$@" # hope for the best...
fi
|
{
"pile_set_name": "Github"
}
|
// CompareView.h : header file of the CCompareView class
//
// Copyright (c) 2015 by Andrew W. Phillips
//
// This file is distributed under the MIT license, which basically says
// you can do what you want with it and I take no responsibility for bugs.
// See http://www.opensource.org/licenses/mit-license.php for full details.
//
#ifndef COMPAREVIEW_INCLUDED
#define COMPAREVIEW_INCLUDED 1
#include "ScrView.h"
#include <vector>
// Forward declarations
class CHexEditDoc;
class CChildFrame;
// When comparing files we use this to displayed the compared with file.
// It depends on the corresponding CHexEditView for a lot of its display
// formating but there is independent control over:
// - file displayed and hence OnDraw and OnInitialUpdate
// - current position (unless using auto-sync)
// - selection (so the user can select parts of this file)
// - searches
class CCompareView : public CScrView
{
friend CHexEditView;
protected: // create from serialization only
CCompareView();
DECLARE_DYNCREATE(CCompareView)
public:
CHexEditView * phev_;
// Attributes
public:
CHexEditDoc * GetDocument() { return (CHexEditDoc*)phev_->m_pDocument; }
FILE_ADDRESS GetPos() const { return pos2addr(GetCaret()); }
BOOL ReadOnly() const { return TRUE; }
BOOL CharMode() const { return phev_->display_.edit_char; }
BOOL EbcdicMode() const { return phev_->display_.char_set == CHARSET_EBCDIC; }
BOOL OemMode() const { return phev_->display_.char_set == CHARSET_OEM; }
BOOL AnsiMode() const { return phev_->display_.char_set == CHARSET_ANSI; }
BOOL DecAddresses() const { return !phev_->display_.hex_addr; } // Now that user can show both addresses at once this is probably the best return value
// Operations
//virtual void SetSel(CPointAp, CPointAp, bool base1 = false);
bool CopyToClipboard();
virtual BOOL MovePos(UINT nChar, UINT nRepCnt, BOOL, BOOL, BOOL);
void MoveToAddress(FILE_ADDRESS astart, FILE_ADDRESS aend = -1, int row = 0);
public:
// Overrides
//virtual void DisplayCaret(int char_width = -1);
virtual BOOL OnCmdMsg(UINT nID, int nCode, void* pExtra,
AFX_CMDHANDLERINFO* pHandlerInfo)
{
// If compare view can't handle it try "owner" hex view
if (CScrView::OnCmdMsg(nID, nCode, pExtra, pHandlerInfo))
return TRUE;
else if (phev_ != NULL)
return phev_->OnCmdMsg(nID, nCode, pExtra, pHandlerInfo);
else
return FALSE;
}
public:
virtual void OnDraw(CDC* pDC); // overridden to draw this view
virtual void OnInitialUpdate();
// virtual void OnPrepareDC(CDC* pDC, CPrintInfo* pInfo = NULL);
protected:
//virtual BOOL OnPreparePrinting(CPrintInfo* pInfo);
//virtual void OnBeginPrinting(CDC* pDC, CPrintInfo* pInfo);
//virtual void OnEndPrinting(CDC* pDC, CPrintInfo* pInfo);
//virtual void OnUpdate(CView* pSender, LPARAM lHint, CObject* pHint);
//virtual void OnPrint(CDC* pDC, CPrintInfo* pInfo);
//virtual void OnEndPrintPreview(CDC* pDC, CPrintInfo* pInfo, POINT point, CPreviewView* pView);
//virtual void OnActivateView(BOOL bActivate, CView* pActivateView, CView* pDeactiveView);
//virtual BOOL PreCreateWindow(CREATESTRUCT& cs);
// Implementation
public:
//virtual void DoInvalidate();
protected:
virtual void ValidateCaret(CPointAp &pos, BOOL inside=TRUE);
//virtual void InvalidateRange(CPointAp start, CPointAp end, bool f = false);
//virtual void DoInvalidateRect(LPCRECT lpRect);
//virtual void DoInvalidateRgn(CRgn* pRgn);
virtual void DoScrollWindow(int xx, int yy);
//virtual void DoUpdateWindow();
//virtual void DoHScroll(int total, int page, int pos);
//virtual void DoVScroll(int total, int page, int pos);
//void DoUpdate();
virtual void AfterScroll(CPointAp newpos)
{
if (phev_ != NULL && phev_->display_.auto_scroll_comp)
phev_->SetScroll(newpos);
}
protected:
//afx_msg void OnDestroy();
afx_msg void OnSize(UINT nType, int cx, int cy);
afx_msg BOOL OnEraseBkgnd(CDC* pDC);
afx_msg void OnLButtonUp(UINT nFlags, CPoint point);
afx_msg void OnContextMenu(CWnd* pWnd, CPoint point);
afx_msg void OnSetFocus(CWnd* pNewWnd);
afx_msg void OnKillFocus(CWnd* pNewWnd);
afx_msg void OnMouseMove(UINT nFlags, CPoint point);
afx_msg LRESULT OnMouseHover(WPARAM wp, LPARAM lp);
afx_msg LRESULT OnMouseLeave(WPARAM wp, LPARAM lp);
afx_msg void OnUpdateDisable(CCmdUI* pCmdUI) { pCmdUI->Enable(FALSE); }
afx_msg void OnCompFirst();
afx_msg void OnUpdateCompFirst(CCmdUI* pCmdUI);
afx_msg void OnCompPrev();
afx_msg void OnUpdateCompPrev(CCmdUI* pCmdUI);
afx_msg void OnCompNext();
afx_msg void OnUpdateCompNext(CCmdUI* pCmdUI);
afx_msg void OnCompLast();
afx_msg void OnUpdateCompLast(CCmdUI* pCmdUI);
afx_msg void OnEditCopy();
afx_msg void OnUpdateEditCopy(CCmdUI* pCmdUI);
afx_msg void OnSelectAll();
DECLARE_MESSAGE_MAP()
private:
void calc_addr_width(FILE_ADDRESS); // Also used by recalc_display
//void draw_bg(CDC* pDC, const CRectAp &doc_rect, bool neg_x, bool neg_y,
// int char_height, int char_width, int char_width_w,
// COLORREF, FILE_ADDRESS start_addr, FILE_ADDRESS end_addr,
// int draw_height = -1);
void draw_bg(CDC* pDC, const CRectAp &doc_rect, bool neg_x, bool neg_y,
int line_height, int char_width, int char_width_w,
COLORREF clr, FILE_ADDRESS start_addr, FILE_ADDRESS end_addr,
bool merge = true, int draw_height = -1);
void draw_deletions(CDC* pDC, const vector<FILE_ADDRESS> & addr, const vector<FILE_ADDRESS> & len,
FILE_ADDRESS first_virt, FILE_ADDRESS last_virt,
const CRectAp &doc_rect, bool neg_x, bool neg_y,
int line_height, int char_width, int char_width_w,
COLORREF colour);
void draw_backgrounds(CDC* pDC,
const vector<FILE_ADDRESS> & addr, const vector<FILE_ADDRESS> & len,
FILE_ADDRESS first_virt, FILE_ADDRESS last_virt,
const CRectAp &doc_rect, bool neg_x, bool neg_y,
int line_height, int char_width, int char_width_w,
COLORREF colour, bool merge = true, int draw_height = -1);
CPointAp addr2pos(FILE_ADDRESS address, int row = 0) const; // Convert byte address in doc to display position
int hex_pos(int column, int width=0) const // get X coord of hex display column
{
if (width == 0) width = phev_->text_width_;
return (addr_width_ + column*3 + column/phev_->group_by_)*width;
}
int char_pos(int column, int widthd=0, int widthw=0) const // get X coord of ASCII/EBCDIC display column
{
if (widthd == 0) widthd = phev_->text_width_;
if (widthw == 0) widthw = phev_->text_width_w_;
if (phev_->display_.vert_display)
return addr_width_*widthd +
(column + column/phev_->group_by_)*widthw;
else if (phev_->display_.hex_area)
return (addr_width_ + phev_->rowsize_*3)*widthd +
((phev_->rowsize_-1)/phev_->group_by_)*widthd +
column*widthw;
else
return addr_width_*widthd +
column*widthw;
}
int pos_hex(int, int inside = FALSE) const; // Closest hex display col given X
int pos_char(int, int inside = FALSE) const; // Closest char area col given X
FILE_ADDRESS pos2addr(CPointAp pos, BOOL inside = TRUE) const; // Convert a display position to closest address
int pos2row(CPointAp pos); // Find vert_display row (0, 1, or 2) of display position
BOOL GetSelAddr(FILE_ADDRESS &start_addr, FILE_ADDRESS &end_addr)
{
ASSERT(phev_->line_height_ > 0);
CPointAp start, end;
BOOL retval = GetSel(start, end);
start_addr = pos2addr(start);
end_addr = pos2addr(end);
return retval;
}
// Functions for selection tip (sel_tip_)
// void show_selection_tip();
void invalidate_addr_range(FILE_ADDRESS, FILE_ADDRESS); // Invalidate hex/aerial display for address range
void invalidate_hex_addr_range(FILE_ADDRESS start_addr, FILE_ADDRESS end_addr); // Invalidate hex view only
void recalc_display();
int addr_width_; // How much room in display does address area take?
int hex_width_, dec_width_, num_width_; // Components of addr_width_
void begin_change(); // Store current state etc
void end_change(); // Fix display etc
BOOL previous_caret_displayed_;
FILE_ADDRESS previous_start_addr_, previous_end_addr_; // selection
BOOL previous_end_base_;
int previous_row_; // row (0-2) if vert_display
};
#endif // COMPAREVIEW_INCLUDED
/////////////////////////////////////////////////////////////////////////////
|
{
"pile_set_name": "Github"
}
|
{
"activePlaceCount": 0,
"birth": {
"place": {
"name": "Lausanne, Schweiz",
"placeName": "Lausanne",
"placeType": "inhabited_place"
},
"time": {
"startYear": 1846
}
},
"birthYear": 1846,
"date": "1846\u20131933",
"death": {
"place": {
"name": "\u00c9ire",
"placeName": "\u00c9ire",
"placeType": "nation"
},
"time": {
"startYear": 1933
}
},
"fc": "Elizabeth Butler (Lady Butler)",
"gender": "Female",
"id": 71,
"mda": "Butler, Elizabeth",
"movements": [],
"startLetter": "B",
"totalWorks": 1,
"url": "http://www.tate.org.uk/art/artists/elizabeth-butler-lady-butler-71"
}
|
{
"pile_set_name": "Github"
}
|
[
{
"op": "add",
"path": "/ResourceTypes/AWS::CloudFormation::WaitCondition/Properties/Timeout/Value",
"value": {
"ValueType": "AWS::CloudFormation::WaitCondition.Timeout"
}
}
]
|
{
"pile_set_name": "Github"
}
|
// Copyright 2013-2015 Bowery, Inc.
package prompt
import (
"os"
"syscall"
"unsafe"
)
// Flags to control the terminals mode.
const (
echoInputFlag = 0x0004
insertModeFlag = 0x0020
lineInputFlag = 0x0002
mouseInputFlag = 0x0010
processedInputFlag = 0x0001
windowInputFlag = 0x0008
)
// Error number returned for an invalid handle.
const errnoInvalidHandle = 0x6
var (
kernel = syscall.NewLazyDLL("kernel32.dll")
getConsoleScreenBufferInfo = kernel.NewProc("GetConsoleScreenBufferInfo")
setConsoleMode = kernel.NewProc("SetConsoleMode")
)
// consoleScreenBufferInfo contains various fields for the terminal.
type consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes uint16
window smallRect
maximumWindowSize coord
}
// coord contains coords for positioning.
type coord struct {
x int16
y int16
}
// smallRect contains positions for the window edges.
type smallRect struct {
left int16
top int16
right int16
bottom int16
}
// terminalSize retrieves the cols/rows for the terminal connected to out.
func terminalSize(out *os.File) (int, int, error) {
csbi := new(consoleScreenBufferInfo)
ret, _, err := getConsoleScreenBufferInfo.Call(out.Fd(), uintptr(unsafe.Pointer(csbi)))
if ret == 0 {
return 0, 0, err
}
// Results are always off by one.
cols := csbi.window.right - csbi.window.left + 1
rows := csbi.window.bottom - csbi.window.top + 1
return int(cols), int(rows), nil
}
// isNotTerminal checks if an error is related to the input not being a terminal.
func isNotTerminal(err error) bool {
errno, ok := err.(syscall.Errno)
return ok && errno == errnoInvalidHandle
}
// terminal contains the private fields for a Windows terminal.
type terminal struct {
supportsEditing bool
fd uintptr
origMode uint32
}
// newTerminal creates a terminal and sets it to raw input mode.
func newTerminal(in *os.File) (*terminal, error) {
term := &terminal{fd: in.Fd()}
err := syscall.GetConsoleMode(syscall.Handle(term.fd), &term.origMode)
if err != nil {
return term, nil
}
mode := term.origMode
term.supportsEditing = true
// Set new mode flags.
mode &^= (echoInputFlag | insertModeFlag | lineInputFlag | mouseInputFlag |
processedInputFlag | windowInputFlag)
ret, _, err := setConsoleMode.Call(term.fd, uintptr(mode))
if ret == 0 {
return nil, err
}
return term, nil
}
// Close disables the terminals raw input.
func (term *terminal) Close() error {
if term.supportsEditing {
ret, _, err := setConsoleMode.Call(term.fd, uintptr(term.origMode))
if ret == 0 {
return err
}
}
return nil
}
|
{
"pile_set_name": "Github"
}
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from rlgraph import get_backend
from rlgraph.components.component import Component
from rlgraph.utils.decorators import rlgraph_api
from rlgraph.utils.ops import DataOpDict, DataOpTuple, FLATTEN_SCOPE_PREFIX
class ContainerMerger(Component):
"""
Merges incoming items into one FlattenedDataOp.
"""
def __init__(self, *input_names_or_num_items, **kwargs):
"""
Args:
*input_names_or_num_items (Union[str,int]): List of the names of the different inputs in the
order they will be passed into the `merge` API-method in the returned merged Dict.
Or the number of items in the Tuple to be merged.
Example:
input_names_or_num_items = ["A", "B"]
- merge(Dict(c=1, d=2), Tuple(3, 4))
- returned value: Dict(A=Dict(c=1, d=2), B=Tuple(3, 4))
input_names_or_num_items = 3: 3 items will be merged into a Tuple.
Keyword Args:
merge_tuples_into_one (bool): Whether to merge incoming DataOpTuples into one single DataOpTuple.
If True: tupleA + tupleB -> (tupleA[0] + tupleA[1] + tupleA[...] + tupleB[0] + tupleB[1] ...).
If False: tupleA + tupleB -> (tupleA + tupleB).
is_tuple (bool): Whether we should merge a tuple.
"""
self.merge_tuples_into_one = kwargs.pop("merge_tuples_into_one", False)
self.is_tuple = kwargs.pop("is_tuple", self.merge_tuples_into_one)
super(ContainerMerger, self).__init__(scope=kwargs.pop("scope", "container-merger"), **kwargs)
self.dict_keys = None
if len(input_names_or_num_items) == 1 and isinstance(input_names_or_num_items[0], int):
self.is_tuple = True
else:
# and not re.search(r'/', i)
# or some of them have '/' characters in them, which are not allowed
assert all(isinstance(i, str) for i in input_names_or_num_items), \
"ERROR: Not all input names of DictMerger Component '{}' are strings.".format(self.global_scope)
self.dict_keys = input_names_or_num_items
def check_input_spaces(self, input_spaces, action_space=None):
spaces = []
idx = 0
while True:
key = "inputs[{}]".format(idx)
if key not in input_spaces:
break
spaces.append(input_spaces[key])
idx += 1
# If Tuple -> Incoming inputs could be of any number.
if self.dict_keys:
len_ = len(self.dict_keys)
assert len(spaces) == len_,\
"ERROR: Number of incoming Spaces ({}) does not match number of given `dict_keys` ({}) in" \
"ContainerMerger Component '{}'!".format(len(spaces), len_, self.global_scope)
@rlgraph_api
def _graph_fn_merge(self, *inputs):
"""
Merges the inputs into a single DataOpDict OR DataOpTuple with the flat keys given in `self.dict_keys`.
Args:
*inputs (FlattenedDataOp): The input items to be merged into a ContainerDataOp.
Returns:
ContainerDataOp: The DataOpDict or DataOpTuple as a merger of all *inputs.
"""
if self.is_tuple is True:
ret = []
for op in inputs:
# Merge single items inside a DataOpTuple into resulting tuple.
if self.merge_tuples_into_one and isinstance(op, DataOpTuple):
ret.extend(list(op))
# Strict by-input merging.
else:
ret.append(op)
return DataOpTuple(ret)
else:
ret = DataOpDict()
for i, op in enumerate(inputs):
if get_backend() == "pytorch" and self.execution_mode == "define_by_run":
ret[FLATTEN_SCOPE_PREFIX + self.dict_keys[i]] = op
else:
ret[self.dict_keys[i]] = op
return ret
|
{
"pile_set_name": "Github"
}
|
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2018, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <enrico.siragusa@fu-berlin.de>
// ==========================================================================
// Approximate string matching via backtracking on two substring indices.
// ==========================================================================
#ifndef SEQAN_FIND_BACKTRACKING_MULTIPLE_H_
#define SEQAN_FIND_BACKTRACKING_MULTIPLE_H_
//#define SEQAN_DEBUG
namespace seqan {
// ============================================================================
// Forwards
// ============================================================================
template <typename TDistance, typename TSpec>
struct Backtracking;
struct StageInitial_;
struct StageUpper_;
struct StageDiagonal_;
struct StageLower_;
struct StageFinal_;
struct StageExact_;
// ============================================================================
// Metafunctions
// ============================================================================
// ----------------------------------------------------------------------------
// Metafunction TextIterator_
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
struct TextIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >
{
typedef typename Iterator<Index<TText, TTextIndexSpec>, TopDown<> >::Type Type;
};
// ----------------------------------------------------------------------------
// Metafunction PatternIterator_
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
struct PatternIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >
{
typedef typename Iterator<Index<TPattern, TPatternIndexSpec>, TopDown<> >::Type Type;
};
// ----------------------------------------------------------------------------
// Metafunction VertexScore_
// ----------------------------------------------------------------------------
template <typename TBacktracking>
struct VertexScore_ {};
template <typename TSpec>
struct VertexScore_<Backtracking<HammingDistance, TSpec> >
{
typedef unsigned char Type;
};
template <typename TSpec>
struct VertexScore_<Backtracking<EditDistance, TSpec> >
{
typedef String<unsigned char> TString;
typedef Segment<TString, InfixSegment> Type;
typedef Segment<const TString, InfixSegment> ConstType;
};
// ----------------------------------------------------------------------------
// Metafunction VertexScoreStack_
// ----------------------------------------------------------------------------
template <typename TBacktracking>
struct VertexScoreStack_ {};
template <typename TSpec>
struct VertexScoreStack_<Backtracking<HammingDistance, TSpec> >
{
typedef String<typename VertexScore_<Backtracking<HammingDistance, TSpec> >::Type> Type;
};
template <typename TSpec>
struct VertexScoreStack_<Backtracking<EditDistance, TSpec> >
{
typedef StringSet<typename VertexScore_<Backtracking<EditDistance, TSpec> >::TString, Owner<ConcatDirect<> > > Type;
};
// ----------------------------------------------------------------------------
// Metafunction NextStage_
// ----------------------------------------------------------------------------
template <typename TBacktracking, typename TStage>
struct NextStage_
{
typedef Nothing Type;
};
// HammingDistance: StageInitial|StageExact -> StageFinal
template <typename TSpec>
struct NextStage_<Backtracking<HammingDistance, TSpec>, StageInitial_>
{
typedef StageFinal_ Type;
};
template <typename TSpec>
struct NextStage_<Backtracking<HammingDistance, TSpec>, StageExact_>
{
typedef StageFinal_ Type;
};
// EditDistance: StageInitial -> StageUpper -> StageDiagonal -> StageLower -> StageFinal
template <typename TSpec>
struct NextStage_<Backtracking<EditDistance, TSpec>, StageInitial_>
{
typedef StageUpper_ Type;
};
template <typename TSpec>
struct NextStage_<Backtracking<EditDistance, TSpec>, StageUpper_>
{
typedef StageDiagonal_ Type;
};
template <typename TSpec>
struct NextStage_<Backtracking<EditDistance, TSpec>, StageDiagonal_>
{
typedef StageLower_ Type;
};
template <typename TSpec>
struct NextStage_<Backtracking<EditDistance, TSpec>, StageLower_>
{
typedef StageFinal_ Type;
};
// ============================================================================
// Tags, Classes, Enums
// ============================================================================
// ----------------------------------------------------------------------------
// Tags for backtracking stages
// ----------------------------------------------------------------------------
struct StageInitial_ {};
struct StageUpper_ {};
struct StageDiagonal_ {};
struct StageLower_ {};
struct StageFinal_ {};
struct StageExact_ {};
// ----------------------------------------------------------------------------
// Class Finder_
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec,
typename TDistance, typename TSpec>
struct Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >
{
typedef Index<TText, TTextIndexSpec> TTextIndex;
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef Backtracking<TDistance, TSpec> TBacktracking;
typedef typename TextIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TTextIterator;
typedef typename PatternIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TPatternIterator;
typedef String<TTextIterator> TTextStack;
typedef String<TPatternIterator> TPatternStack;
typedef typename VertexScoreStack_<TBacktracking>::Type TVertexScoreStack;
typedef typename Score_<TBacktracking>::Type TScore;
TTextStack textStack;
TPatternStack patternStack;
TVertexScoreStack scoreStack;
TScore maxScore;
Finder_() :
textStack(),
patternStack(),
scoreStack(),
maxScore(0)
{}
};
// ============================================================================
// Functions
// ============================================================================
// ----------------------------------------------------------------------------
// Function _min3()
// ----------------------------------------------------------------------------
template <typename TValue>
inline TValue
_min3(TValue a, TValue b, TValue c)
{
TValue m = a;
if (m > b) m = b;
if (m > c) m = c;
return m;
}
// ----------------------------------------------------------------------------
// Function _updateVertexScore()
// ----------------------------------------------------------------------------
template <typename TVertexScore>
inline void
_updateVertexScore(TVertexScore current,
TVertexScore const previous,
StageInitial_ const & /* tag */)
{
// Update last cell.
// C[i,0] = C[i-1,0] + 1 [Upper]
back(current) = back(previous) + 1;
}
template <typename TVertexScore, typename TTextValue, typename TPatternIterator, typename TStage>
inline void
_updateVertexScore(TVertexScore current,
TVertexScore const previous,
TTextValue textChar,
TPatternIterator patternIt,
TStage const & /* tag */)
{
typedef typename Iterator<TVertexScore, Standard>::Type TVertexScoreIterator;
typedef typename Iterator<TVertexScore const, Standard>::Type TVertexScoreConstIterator;
typedef typename Value<TVertexScore>::Type TScore;
TVertexScoreIterator currentIt = begin(current, Standard());
TVertexScoreIterator columnEnd = end(current, Standard());
TVertexScoreConstIterator previousIt = begin(previous, Standard());
// Update first cell.
SEQAN_IF_CONSTEXPR (IsSameType<TStage, StageUpper_>::VALUE)
{
// C[0,j] = C[0,j-1] + 1 [Left]
value(currentIt) = value(previousIt) + 1;
}
else
{
TScore score = ordEqual(textChar, value(patternIt)) ? 0 : 1;
// C[i,j] = min { C[i-1,j-1] + d(t,p), C[i-1,j] + 1 } [Diagonal, Left]
value(currentIt) = _min(value(previousIt) + score, value(previousIt + 1) + 1);
++previousIt;
++patternIt;
}
// Update central cells.
for (++currentIt; currentIt != columnEnd - 1; ++currentIt, ++previousIt, ++patternIt)
{
TScore score = ordEqual(textChar, value(patternIt)) ? 0 : 1;
// C[i,j] = min { C[i-1,j-1] + d(t,p), C[i-1,j] + 1, C[i,j-1] + 1 } [Diagonal, Left, Upper]
value(currentIt) = _min3(value(previousIt) + score, value(previousIt + 1) + 1, value(currentIt - 1) + 1);
}
// Update last cell.
SEQAN_IF_CONSTEXPR (IsSameType<TStage, StageLower_>::VALUE)
{
TScore score = ordEqual(textChar, value(patternIt)) ? 0 : 1;
// C[i,j] = min { C[i-1,j-1] + d(t,p), C[i-1,j] + 1, C[i,j-1] + 1 } [Diagonal, Left, Upper]
value(currentIt) = _min3(value(previousIt) + score, value(previousIt + 1) + 1, value(currentIt - 1) + 1);
++previousIt;
}
else
{
TScore score = ordEqual(textChar, value(patternIt)) ? 0 : 1;
// C[i,j] = min { C[i-1,j-1] + d(t,p), C[i,j-1] + 1 } [Diagonal, Upper]
value(currentIt) = _min(value(previousIt) + score, value(currentIt - 1) + 1);
}
// Assert end of columns.
SEQAN_ASSERT_EQ(currentIt + 1, end(current, Standard()));
SEQAN_ASSERT_EQ(previousIt + 1, end(previous, Standard()));
}
template <typename TVertexScore, typename TTextValue, typename TPatternValue>
inline void
_updateVertexScore(TVertexScore current,
TVertexScore const previous,
TTextValue textChar,
TPatternValue patternChar,
StageFinal_ const & /* tag */)
{
typedef typename Iterator<TVertexScore, Standard>::Type TVertexScoreIterator;
typedef typename Iterator<TVertexScore const, Standard>::Type TVertexScoreConstIterator;
typedef typename Value<TVertexScore>::Type TScore;
TVertexScoreIterator currentIt = begin(current, Standard());
TVertexScoreConstIterator previousIt = begin(previous, Standard());
// Update last cell.
TScore score = ordEqual(textChar, patternChar) ? 0 : 1;
// C[i,j] = min { C[i-1,j-1] + d(t,p), C[i-1,j] + 1 } [Diagonal, Left]
value(currentIt) = _min(value(previousIt) + score, value(previousIt + 1) + 1);
// Assert end of columns.
SEQAN_ASSERT_EQ(currentIt + 1, end(current, Standard()));
SEQAN_ASSERT_EQ(previousIt + 2, end(previous, Standard()));
}
// ----------------------------------------------------------------------------
// Function copyBackAndResize() [StringSet]
// ----------------------------------------------------------------------------
template <typename TString, typename TSSetSpec, typename TDelta>
inline void
copyBackAndResize(StringSet<TString, TSSetSpec> & stringSet, TDelta delta)
{
// Copy last element to the back.
appendValue(stringSet, back(stringSet));
// Update limits.
back(stringSet.limits) += delta;
// Resize concat.
resize(stringSet.concat, length(stringSet.concat) + delta);
}
// ----------------------------------------------------------------------------
// Function clear()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline void
clear(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder)
{
clear(finder.textStack);
clear(finder.patternStack);
clear(finder.scoreStack);
}
// ----------------------------------------------------------------------------
// Function _initState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TTextIterator, typename TPatternIterator>
inline void
_initState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TTextIterator const & textIt, TPatternIterator const & patternIt)
{
// Init iterators.
appendValue(finder.textStack, textIt);
appendValue(finder.patternStack, patternIt);
_initScore(finder);
#ifdef SEQAN_DEBUG
_printState(finder, StageInitial_());
#endif
}
// ----------------------------------------------------------------------------
// Function _initScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline void
_initScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder)
{
typedef Backtracking<TDistance, TSpec> TBacktracking;
typedef typename Score_<TBacktracking>::Type TScore;
// Push zero.
appendValue(finder.scoreStack, TScore());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_initScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder)
{
typedef Backtracking<EditDistance, TSpec> TBacktracking;
typedef typename Score_<TBacktracking>::Type TScore;
// Push a column with one zero cell.
append(finder.scoreStack.limits, 1);
appendValue(finder.scoreStack.concat, TScore());
}
// ----------------------------------------------------------------------------
// Function _pushState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_pushState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
_pushIterators(finder, TStage());
_pushScore(finder, TStage());
if (_moveIteratorsDown(finder, TStage()))
{
_updateScore(finder, TStage());
#ifdef SEQAN_DEBUG
_printPush(finder, TStage());
#endif
return true;
}
_popIterators(finder, TStage());
_popScore(finder, TStage());
return false;
}
// ----------------------------------------------------------------------------
// Function _pushIterators()
// ----------------------------------------------------------------------------
// TODO(esiragusa): Specialize _pushIterators() for StageInitial_ and StageFinal_ of EditDistance
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_pushIterators(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
appendValue(finder.patternStack, back(finder.patternStack));
appendValue(finder.textStack, back(finder.textStack));
}
// ----------------------------------------------------------------------------
// Function _pushScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
// Copy the last score on top of the stack.
appendValue(finder.scoreStack, back(finder.scoreStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & /* finder */,
StageExact_ const & /* tag */)
{
// Do nothing.
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageInitial_ const & /* tag */)
{
// Copy the last column on top of the stack and add one cell.
copyBackAndResize(finder.scoreStack, 1);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageUpper_ const & /* tag */)
{
_pushScore(finder, StageInitial_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageLower_ const & /* tag */)
{
// Copy the last column on top of the stack and remove one cell.
copyBackAndResize(finder.scoreStack, -1);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_pushScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageFinal_ const & /* tag */)
{
_pushScore(finder, StageLower_());
}
// ----------------------------------------------------------------------------
// Function _moveIteratorsDown()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_moveIteratorsDown(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
// Go down in text and pattern.
return goDown(back(finder.textStack)) && goDown(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsDown(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & finder,
StageExact_ const & /* tag */)
{
typedef Index<TText, TTextIndexSpec> TTextIndex;
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef Backtracking<HammingDistance, TSpec> TBacktracking;
typedef typename TextIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TTextIterator;
typedef typename PatternIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TPatternIterator;
TTextIterator & textIt = back(finder.textStack);
TPatternIterator & patternIt = back(finder.patternStack);
// Go down in pattern and search pattern label in text.
if (goDown(patternIt) && goDown(textIt, parentEdgeLabel(patternIt)))
return true;
// Otherwise go right in pattern.
return _moveIteratorsRight(finder, StageExact_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsDown(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageInitial_ const & /* tag */)
{
// Go down in pattern.
return goDown(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsDown(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageLower_ const & /* tag */)
{
// Go down in text.
return goDown(back(finder.textStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsDown(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageFinal_ const & /* tag */)
{
return _moveIteratorsDown(finder, StageLower_());
}
// ----------------------------------------------------------------------------
// Function _nextState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_nextState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
if (_moveIteratorsRight(finder, TStage()))
{
_updateScore(finder, TStage());
#ifdef SEQAN_DEBUG
_printState(finder, TStage());
#endif
return true;
}
return false;
}
// ----------------------------------------------------------------------------
// Function _moveIteratorsRight()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_moveIteratorsRight(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
// Try to go right in the pattern.
if (goRight(back(finder.patternStack)))
{
return true;
}
// Try to go right in the text.
else if (goRight(back(finder.textStack)))
{
// Move to the leftmost pattern.
// back(finder.patternStack) = finder.patternLeftmost;
eraseBack(finder.patternStack);
appendValue(finder.patternStack, back(finder.patternStack));
goDown(back(finder.patternStack));
return true;
}
return false;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsRight(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & finder,
StageExact_ const & /* tag */)
{
typedef Index<TText, TTextIndexSpec> TTextIndex;
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef Backtracking<HammingDistance, TSpec> TBacktracking;
typedef typename TextIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TTextIterator;
typedef typename PatternIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TPatternIterator;
// TODO(esiragusa): Implement goRight(it, pattern).
// TTextIterator & textIt = back(finder.textStack);
// TPatternIterator & patternIt = back(finder.patternStack);
// return goRight(patternIt) && goRight(textIt, parentEdgeLabel(patternIt));
TPatternIterator & patternIt = back(finder.patternStack);
// Try to go right in the pattern.
while (goRight(patternIt))
{
// Move up in the text.
back(finder.textStack) = finder.textStack[length(finder.textStack) - 2];
// Search pattern label in text.
TTextIterator & textIt = back(finder.textStack);
if (goDown(textIt, parentEdgeLabel(patternIt)))
return true;
}
return false;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsRight(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageInitial_ const & /* tag */)
{
// Try to go right in the pattern.
return goRight(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveIteratorsRight(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageLower_ const & /* tag */)
{
// Try to go right in the text.
return goRight(back(finder.textStack));
}
// ----------------------------------------------------------------------------
// Function _popState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_popState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
_popIterators(finder, TStage());
_popScore(finder, TStage());
#ifdef SEQAN_DEBUG
_printPop(finder, TStage());
#endif
}
// ----------------------------------------------------------------------------
// Function _popIterators()
// ----------------------------------------------------------------------------
// TODO(esiragusa): Specialize _popIterators() for StageInitial_ and StageFinal_ of EditDistance
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_popIterators(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
eraseBack(finder.textStack);
eraseBack(finder.patternStack);
}
// ----------------------------------------------------------------------------
// Function _popScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_popScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TStage const & /* tag */)
{
eraseBack(finder.scoreStack);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_popScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & /* finder */,
StageExact_ const & /* tag */)
{
// Do nothing.
}
// ----------------------------------------------------------------------------
// Function _updateScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & /* finder */,
TStage const & /* tag */)
{}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec, typename TStage>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & finder,
TStage const & /* tag */)
{
typedef Backtracking<HammingDistance, TSpec> TBacktracking;
typedef typename Score_<TBacktracking>::Type TScore;
// Compute score of text and pattern.
TScore score = ordEqual(parentEdgeLabel(back(finder.textStack)), parentEdgeLabel(back(finder.patternStack))) ? 0 : 1;
// Add score to previous score.
back(finder.scoreStack) = value(finder.scoreStack, length(finder.scoreStack) - 2) + score;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > & /* finder */,
StageExact_ const & /* tag */)
{
// Do nothing.
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageInitial_ const & /* tag */)
{
_updateVertexScore(back(finder.scoreStack), value(finder.scoreStack, length(finder.scoreStack) - 2), StageInitial_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageUpper_ const & /* tag */)
{
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef typename Fibre<TPatternIndex, FibreText>::Type const TPatternFibreText;
typedef typename InfixOnValue<TPatternFibreText>::Type TPatternRepr;
typedef typename Iterator<TPatternRepr, Standard>::Type TPatternReprIterator;
// Get pattern read so far.
TPatternRepr pattern = representative(back(finder.patternStack));
TPatternReprIterator patternIt = begin(pattern, Standard());
_updateVertexScore(back(finder.scoreStack),
value(finder.scoreStack, length(finder.scoreStack) - 2),
parentEdgeLabel(back(finder.textStack)),
patternIt,
StageUpper_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageDiagonal_ const & /* tag */)
{
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef typename Fibre<TPatternIndex, FibreText>::Type const TPatternFibreText;
typedef typename InfixOnValue<TPatternFibreText>::Type TPatternRepr;
typedef typename Iterator<TPatternRepr, Standard>::Type TPatternReprIterator;
// Get last 2k + 1 pattern symbols.
TPatternRepr pattern = representative(back(finder.patternStack));
TPatternReprIterator patternIt = end(pattern, Standard()) - (2 * finder.maxScore + 1);
_updateVertexScore(back(finder.scoreStack),
value(finder.scoreStack, length(finder.scoreStack) - 2),
parentEdgeLabel(back(finder.textStack)),
patternIt,
StageDiagonal_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageLower_ const & /* tag */)
{
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef typename Fibre<TPatternIndex, FibreText>::Type const TPatternFibreText;
typedef typename InfixOnValue<TPatternFibreText>::Type TPatternRepr;
typedef typename Iterator<TPatternRepr, Standard>::Type TPatternReprIterator;
// Get last pattern symbols.
TPatternRepr pattern = representative(back(finder.patternStack));
TPatternReprIterator patternIt = end(pattern, Standard()) - (length(back(finder.scoreStack)));
_updateVertexScore(back(finder.scoreStack),
value(finder.scoreStack, length(finder.scoreStack) - 2),
parentEdgeLabel(back(finder.textStack)),
patternIt,
StageLower_());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_updateScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > & finder,
StageFinal_ const & /* tag */)
{
_updateVertexScore(back(finder.scoreStack),
value(finder.scoreStack, length(finder.scoreStack) - 2),
parentEdgeLabel(back(finder.textStack)),
parentEdgeLabel(back(finder.patternStack)),
StageFinal_());
}
// ----------------------------------------------------------------------------
// Function _setScoreThreshold()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TMaxScore>
inline void
_setScoreThreshold(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TMaxScore maxScore)
{
finder.maxScore = maxScore;
}
// ----------------------------------------------------------------------------
// Function _getMinScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline typename Score_<Backtracking<HammingDistance, TSpec> >::Type
_getMinScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder)
{
return _getScore(finder);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline typename Score_<Backtracking<EditDistance, TSpec> >::Type
_getMinScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder)
{
typedef Backtracking<EditDistance, TSpec> TBacktracking;
typedef typename VertexScore_<TBacktracking>::ConstType TVertexScore;
TVertexScore column = back(finder.scoreStack);
// Return the min value in column.
return value(std::min_element(begin(column, Standard()), end(column, Standard())));
}
// ----------------------------------------------------------------------------
// Function _getScore()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline typename Score_<Backtracking<HammingDistance, TSpec> >::Type
_getScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder)
{
// Return the last value.
return back(finder.scoreStack);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline typename Score_<Backtracking<EditDistance, TSpec> >::Type
_getScore(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder)
{
// Return the value of last cell in column.
return back(back(finder.scoreStack));
}
// ----------------------------------------------------------------------------
// Function _textIterator()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline
typename TextIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >::Type &
_textIterator(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder)
{
return back(finder.textStack);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline
typename TextIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >::Type const &
_textIterator(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder)
{
return back(finder.textStack);
}
// ----------------------------------------------------------------------------
// Function _patternIterator()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline
typename PatternIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >::Type &
_patternIterator(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder)
{
return back(finder.patternStack);
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec>
inline
typename PatternIterator_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> >::Type const &
_patternIterator(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder)
{
return back(finder.patternStack);
}
// ----------------------------------------------------------------------------
// Function _inTerminalState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_inTerminalState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & /* finder */,
TStage const & /* tag */)
{
// The current state is not terminal by default.
return false;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_inTerminalState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder,
StageFinal_ const & /* tag */)
{
// Is the score within the max score?
return _getScore(finder) <= finder.maxScore;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_inTerminalState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageLower_ const & /* tag */)
{
// Is the score within the max score?
return _getScore(finder) <= finder.maxScore;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_inTerminalState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageFinal_ const & /* tag */)
{
// Is the score within the max score?
return _getScore(finder) <= finder.maxScore;
}
// ----------------------------------------------------------------------------
// Function _inActiveState()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_inActiveState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
// Is the minimum score within the max score?
return _getMinScore(finder) <= finder.maxScore;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_inActiveState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & /* finder */,
StageExact_ const & /* tag */)
{
// Exact search only walks through active states.
return true;
}
// ----------------------------------------------------------------------------
// Function _moveToExactStage()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_moveToStageExact(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & /* finder */,
TStage const & /* tag */)
{
// By default there is no exact stage.
return false;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToStageExact(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder,
StageInitial_ const & /* tag */)
{
// Was the maximum score attained?
return _getMinScore(finder) == finder.maxScore;
}
//template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec, typename TStage>
//inline bool
//_moveToStageExact(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
// TStage const & /* tag */)
//{
// // TODO(esiragusa): Implement exact search speedup for EditDistance.
// return _getMinScore(finder) == finder.maxScore;
//}
// ----------------------------------------------------------------------------
// Function _moveToNextStage()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & /* finder */,
TStage const & /* tag */)
{
// By default there is no next stage.
return false;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder,
StageInitial_ const & /* tag */)
{
return isRightTerminal(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder,
StageExact_ const & /* tag */)
{
return isRightTerminal(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageInitial_ const & /* tag */)
{
// Move to upper stage when k pattern symbols have been consumed.
return (repLength(back(finder.patternStack)) >= finder.maxScore) || isRightTerminal(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageUpper_ const & /* tag */)
{
// Move to diagonal stage when the diagonal has size 2k + 1.
return (length(back(finder.scoreStack)) >= 2u * finder.maxScore + 1u) || isRightTerminal(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageDiagonal_ const & /* tag */)
{
// Move to lower stage when all pattern symbols have been consumed.
return isRightTerminal(back(finder.patternStack));
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_moveToNextStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageLower_ const & /* tag */)
{
// Move to final stage when there is only one cell left to compute.
return length(back(finder.scoreStack)) <= 2;
}
// ----------------------------------------------------------------------------
// Function _stayInCurrentStage()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline bool
_stayInCurrentStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & /* finder */,
TStage const & /* tag */)
{
// Stay in the current stage by default.
return true;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_stayInCurrentStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageInitial_ const & /* tag */)
{
return repLength(back(finder.patternStack)) < finder.maxScore;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_stayInCurrentStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageUpper_ const & /* tag */)
{
return length(back(finder.scoreStack)) < 2u * finder.maxScore + 1u;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline bool
_stayInCurrentStage(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
StageLower_ const & /* tag */)
{
return length(back(finder.scoreStack)) > 2;
}
// ----------------------------------------------------------------------------
// Functions _print*()
// ----------------------------------------------------------------------------
// NOTE(esiragusa): Debug functions.
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_printCall(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "call: "; _printFindSignature(finder, TStage());
std::cout << "past text: " << representative(back(finder.textStack)) << std::endl;
std::cout << "past pattern: " << representative(back(finder.patternStack)) << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_printReturn(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "return: "; _printFindSignature(finder, TStage());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_printPush(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "push: "; _printFindSignature(finder, TStage());
_printState(finder, TStage());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_printPop(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "pop: "; _printFindSignature(finder, TStage());
_printState(finder, TStage());
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > const & /* finder */,
TStage const & /* tag */)
{
std::cout << "<TDistance, TStage>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & /* finder */,
StageInitial_ const & /* tag */)
{
std::cout << "<HammingDistance, StageInitial>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & /* finder */,
StageFinal_ const & /* tag */)
{
std::cout << "<HammingDistance, StageFinal>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & /* finder */,
StageExact_ const & /* tag */)
{
std::cout << "<HammingDistance, StageExact>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & /* finder */,
StageInitial_ const & /* tag */)
{
std::cout << "<EditDistance, StageInitial>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & /* finder */,
StageUpper_ const & /* tag */)
{
std::cout << "<EditDistance, StageUpper>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & /* finder */,
StageDiagonal_ const & /* tag */)
{
std::cout << "<EditDistance, StageDiagonal>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & /* finder */,
StageLower_ const & /* tag */)
{
std::cout << "<EditDistance, StageLower>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec>
inline void
_printFindSignature(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & /* finder */,
StageFinal_ const & /* tag */)
{
std::cout << "<EditDistance, StageFinal>" << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec, typename TStage>
inline void
_printState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<HammingDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "text: " << parentEdgeLabel(back(finder.textStack)) << std::endl;
std::cout << "pattern: " << parentEdgeLabel(back(finder.patternStack)) << std::endl;
std::cout << "errors: " << static_cast<unsigned>(_getScore(finder)) << std::endl;
}
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TSpec, typename TStage>
inline void
_printState(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<EditDistance, TSpec> > const & finder,
TStage const & /* tag */)
{
std::cout << "text: " << parentEdgeLabel(back(finder.textStack)) << std::endl;
std::cout << "pattern: " << parentEdgeLabel(back(finder.patternStack)) << std::endl;
std::cout << "column: " << "|";
std::copy(begin(back(finder.scoreStack), Standard()),
end(back(finder.scoreStack), Standard()),
std::ostream_iterator<int>(std::cout, "|"));
std::cout << std::endl;
}
// ----------------------------------------------------------------------------
// Function _find()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TStage, typename TDelegate>
inline void
_find(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
TDelegate & delegate,
TStage const & /* tag */)
{
typedef Backtracking<TDistance, TSpec> TBacktracking;
typedef typename NextStage_<TBacktracking, TStage>::Type TNextStage;
#ifdef SEQAN_DEBUG
_printCall(finder, TStage());
#endif
if (_moveToStageExact(finder, TStage()))
{
_find(finder, delegate, StageExact_());
}
else
{
if (_moveToNextStage(finder, TStage()))
{
_find(finder, delegate, TNextStage());
}
if (_stayInCurrentStage(finder, TStage()))
{
if (_inTerminalState(finder, TStage()))
{
// Inversion of control.
delegate(finder);
}
else if (_inActiveState(finder, TStage()))
{
if (_pushState(finder, TStage()))
{
do
{
_find(finder, delegate, TStage());
}
while (_nextState(finder, TStage()));
_popState(finder, TStage());
}
}
}
}
#ifdef SEQAN_DEBUG
_printReturn(finder, TStage());
#endif
}
// ----------------------------------------------------------------------------
// Function _find()
// ----------------------------------------------------------------------------
template <typename TText, typename TTextIndexSpec, typename TPattern, typename TPatternIndexSpec, typename TDistance, typename TSpec, typename TValue, typename TDelegate>
inline void
_find(Finder_<Index<TText, TTextIndexSpec>, Index<TPattern, TPatternIndexSpec>, Backtracking<TDistance, TSpec> > & finder,
Index<TText, TTextIndexSpec> & text,
Index<TPattern, TPatternIndexSpec> & pattern,
TValue maxScore,
TDelegate & delegate)
{
typedef Index<TText, TTextIndexSpec> TTextIndex;
typedef Index<TPattern, TPatternIndexSpec> TPatternIndex;
typedef Backtracking<TDistance, TSpec> TBacktracking;
typedef typename TextIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TTextIterator;
typedef typename PatternIterator_<TTextIndex, TPatternIndex, TBacktracking>::Type TPatternIterator;
TTextIterator textIt(text);
TPatternIterator patternIt(pattern);
_setScoreThreshold(finder, maxScore);
_initState(finder, textIt, patternIt);
_find(finder, delegate, StageInitial_());
_popState(finder, StageInitial_());
}
}
#endif // #ifndef SEQAN_FIND_BACKTRACKING_MULTIPLE_H_
|
{
"pile_set_name": "Github"
}
|
# created by tools/tclZIC.tcl - do not edit
set TZData(:America/Lima) {
{-9223372036854775808 -18492 0 LMT}
{-2524503108 -18516 0 LMT}
{-1938538284 -14400 0 -05}
{-1002052800 -18000 0 -05}
{-986756400 -14400 1 -05}
{-971035200 -18000 0 -05}
{-955306800 -14400 1 -05}
{-939585600 -18000 0 -05}
{512712000 -18000 0 -05}
{544248000 -18000 0 -05}
{638942400 -18000 0 -05}
{765172800 -18000 0 -05}
}
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="ko">
<head>
<!-- Generated by javadoc (1.8.0_191) on Tue Mar 03 19:55:19 KST 2020 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>GsoHeaderProperty (HWP Library 1.0.1 API)</title>
<meta name="date" content="2020-03-03">
<link rel="stylesheet" type="text/css" href="../../../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="GsoHeaderProperty (HWP Library 1.0.1 API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/GsoHeaderProperty.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html" target="_top">Frames</a></li>
<li><a href="GsoHeaderProperty.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso</div>
<h2 title="Class GsoHeaderProperty" class="title">Class GsoHeaderProperty</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">java.lang.Object</a></li>
<li>
<ul class="inheritance">
<li>kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.GsoHeaderProperty</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<hr>
<br>
<pre>public class <span class="typeNameLabel">GsoHeaderProperty</span>
extends <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
<div class="block">그리기 객체 컨트롤의 속성을 나타내는 객체</div>
<dl>
<dt><span class="simpleTagLabel">Author:</span></dt>
<dd>neolord</dd>
</dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#GsoHeaderProperty--">GsoHeaderProperty</a></span>()</code>
<div class="block">생성자</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t4" class="tableTab"><span><a href="javascript:show(8);">Concrete Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HeightCriterion</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getHeightCriterion--">getHeightCriterion</a></span>()</code>
<div class="block">오브젝트 높이의 기준을 반환한다 (18~19 bit)</div>
</td>
</tr>
<tr id="i1" class="rowColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getHorzRelativeArrange--">getHorzRelativeArrange</a></span>()</code>
<div class="block">HorzRelTo에 대한 상대적인 배열방식을 반환한다. (10~12 bit)</div>
</td>
</tr>
<tr id="i2" class="altColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HorzRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HorzRelTo</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getHorzRelTo--">getHorzRelTo</a></span>()</code>
<div class="block">가로 위치의 기준을 반환한다. (8~9 bit)</div>
</td>
</tr>
<tr id="i3" class="rowColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/ObjectNumberSort.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">ObjectNumberSort</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getObjectNumberSort--">getObjectNumberSort</a></span>()</code>
<div class="block">개체가 속하는 번호 범주를 반환한다. (26~28 bit)</div>
</td>
</tr>
<tr id="i4" class="altColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextFlowMethod.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextFlowMethod</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getTextFlowMethod--">getTextFlowMethod</a></span>()</code>
<div class="block">오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션을 반환한다. (21~23 bit)</div>
</td>
</tr>
<tr id="i5" class="rowColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextHorzArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextHorzArrange</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getTextHorzArrange--">getTextHorzArrange</a></span>()</code>
<div class="block">오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션을 반환한다. (24~25 bit)</div>
</td>
</tr>
<tr id="i6" class="altColor">
<td class="colFirst"><code>long</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getValue--">getValue</a></span>()</code>
<div class="block">파일에 저장되는 정수값을 반환한다.</div>
</td>
</tr>
<tr id="i7" class="rowColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getVertRelativeArrange--">getVertRelativeArrange</a></span>()</code>
<div class="block">세로 위치의 기준에 대한 상대적인 배열방식을 반환한다. (5~7 bit)</div>
</td>
</tr>
<tr id="i8" class="altColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/VertRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">VertRelTo</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getVertRelTo--">getVertRelTo</a></span>()</code>
<div class="block">세로 위치의 기준을 반환한다. (3~4 bit)</div>
</td>
</tr>
<tr id="i9" class="rowColor">
<td class="colFirst"><code><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/WidthCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">WidthCriterion</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#getWidthCriterion--">getWidthCriterion</a></span>()</code>
<div class="block">오브젝트 폭의 기준을 반환한다. (15~17 bit)</div>
</td>
</tr>
<tr id="i10" class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#isAllowOverlap--">isAllowOverlap</a></span>()</code>
<div class="block">다른 오브젝트와 겹치는 것을 허용할지 여부을 반한한다. (14 bit)</div>
</td>
</tr>
<tr id="i11" class="rowColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#isApplyLineSpace--">isApplyLineSpace</a></span>()</code>
<div class="block">줄 간격에 영향을 줄지 여부를 반환한다. (2 bit)</div>
</td>
</tr>
<tr id="i12" class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#isLikeWord--">isLikeWord</a></span>()</code>
<div class="block">글자처럼 취급 여부을 반환한다. (0 bit)</div>
</td>
</tr>
<tr id="i13" class="rowColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#isProtectSize--">isProtectSize</a></span>()</code>
<div class="block">VertRelTo이 para일 때 크기 보호 여부을 반환한다. (20 bit)</div>
</td>
</tr>
<tr id="i14" class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#isVertRelToParaLimit--">isVertRelToParaLimit</a></span>()</code>
<div class="block">VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부를 반환한다. (13 bit)</div>
</td>
</tr>
<tr id="i15" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setAllowOverlap-boolean-">setAllowOverlap</a></span>(boolean allowOverlap)</code>
<div class="block">다른 오브젝트와 겹치는 것을 허용할지 여부을 설정한다. (14 bit)</div>
</td>
</tr>
<tr id="i16" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setApplyLineSpace-boolean-">setApplyLineSpace</a></span>(boolean applyLineSpace)</code>
<div class="block">줄 간격에 영향을 줄지 여부를 설정한다. (2 bit)</div>
</td>
</tr>
<tr id="i17" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setHeightCriterion-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.HeightCriterion-">setHeightCriterion</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HeightCriterion</a> heightCriterion)</code>
<div class="block">오브젝트 높이의 기준을 설정한다. (18~19 bit)</div>
</td>
</tr>
<tr id="i18" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setHorzRelativeArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.RelativeArrange-">setHorzRelativeArrange</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> horzRelativeArrange)</code>
<div class="block">HorzRelTo에 대한 상대적인 배열방식을 설정한다. (10~12 bit)</div>
</td>
</tr>
<tr id="i19" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setHorzRelTo-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.HorzRelTo-">setHorzRelTo</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HorzRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HorzRelTo</a> horzRelTo)</code>
<div class="block">가로 위치의 기준을 설정한다. (8~9 bit)</div>
</td>
</tr>
<tr id="i20" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setLikeWord-boolean-">setLikeWord</a></span>(boolean likeWord)</code>
<div class="block">글자처럼 취급 여부를 설정한다. (0 bit)</div>
</td>
</tr>
<tr id="i21" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setObjectNumberSort-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.ObjectNumberSort-">setObjectNumberSort</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/ObjectNumberSort.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">ObjectNumberSort</a> objectNumberSort)</code>
<div class="block">개체가 속하는 번호 범주를 설정한다. (26~28 bit)</div>
</td>
</tr>
<tr id="i22" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setProtectSize-boolean-">setProtectSize</a></span>(boolean protectSize)</code>
<div class="block">VertRelTo이 para일 때 크기 보호 여부를 설정한다.</div>
</td>
</tr>
<tr id="i23" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setTextFlowMethod-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.TextFlowMethod-">setTextFlowMethod</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextFlowMethod.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextFlowMethod</a> textFlowMethod)</code>
<div class="block">오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션을 설정한다. (21~23 bit)</div>
</td>
</tr>
<tr id="i24" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setTextHorzArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.TextHorzArrange-">setTextHorzArrange</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextHorzArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextHorzArrange</a> textHorzArrange)</code>
<div class="block">오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션을 설정한다. (24~25 bit)</div>
</td>
</tr>
<tr id="i25" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setValue-long-">setValue</a></span>(long value)</code>
<div class="block">파일에 저장되는 정수값을 설정한다.</div>
</td>
</tr>
<tr id="i26" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setVertRelativeArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.RelativeArrange-">setVertRelativeArrange</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> vertRelativeArrange)</code>
<div class="block">세로 위치의 기준에 대한 상대적인 배열방식를 설정한다. (5~7 bit)</div>
</td>
</tr>
<tr id="i27" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setVertRelTo-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.VertRelTo-">setVertRelTo</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/VertRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">VertRelTo</a> vertRelTo)</code>
<div class="block">세로 위치의 기준을 설정한다. (3~4 bit)</div>
</td>
</tr>
<tr id="i28" class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setVertRelToParaLimit-boolean-">setVertRelToParaLimit</a></span>(boolean vertRelToParaLimit)</code>
<div class="block">VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부을 설정한다.</div>
</td>
</tr>
<tr id="i29" class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html#setWidthCriterion-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.WidthCriterion-">setWidthCriterion</a></span>(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/WidthCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">WidthCriterion</a> widthCriterion)</code>
<div class="block">오브젝트 폭의 기준을 설정한다. (15~17 bit)</div>
</td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.<a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
<code><a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-" title="class or interface in java.lang">equals</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode--" title="class or interface in java.lang">hashCode</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="GsoHeaderProperty--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>GsoHeaderProperty</h4>
<pre>public GsoHeaderProperty()</pre>
<div class="block">생성자</div>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="getValue--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getValue</h4>
<pre>public long getValue()</pre>
<div class="block">파일에 저장되는 정수값을 반환한다.</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>파일에 저장되는 정수값</dd>
</dl>
</li>
</ul>
<a name="setValue-long-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setValue</h4>
<pre>public void setValue(long value)</pre>
<div class="block">파일에 저장되는 정수값을 설정한다.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>value</code> - 파일에 저장되는 정수값</dd>
</dl>
</li>
</ul>
<a name="isLikeWord--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isLikeWord</h4>
<pre>public boolean isLikeWord()</pre>
<div class="block">글자처럼 취급 여부을 반환한다. (0 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>글자처럼 취급 여부</dd>
</dl>
</li>
</ul>
<a name="setLikeWord-boolean-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setLikeWord</h4>
<pre>public void setLikeWord(boolean likeWord)</pre>
<div class="block">글자처럼 취급 여부를 설정한다. (0 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>likeWord</code> - 글자처럼 취급 여부</dd>
</dl>
</li>
</ul>
<a name="isApplyLineSpace--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isApplyLineSpace</h4>
<pre>public boolean isApplyLineSpace()</pre>
<div class="block">줄 간격에 영향을 줄지 여부를 반환한다. (2 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>줄 간격에 영향을 줄지 여부</dd>
</dl>
</li>
</ul>
<a name="setApplyLineSpace-boolean-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setApplyLineSpace</h4>
<pre>public void setApplyLineSpace(boolean applyLineSpace)</pre>
<div class="block">줄 간격에 영향을 줄지 여부를 설정한다. (2 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>applyLineSpace</code> - 줄 간격에 영향을 줄지 여부</dd>
</dl>
</li>
</ul>
<a name="getVertRelTo--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getVertRelTo</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/VertRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">VertRelTo</a> getVertRelTo()</pre>
<div class="block">세로 위치의 기준을 반환한다. (3~4 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>세로 위치의 기준</dd>
</dl>
</li>
</ul>
<a name="setVertRelTo-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.VertRelTo-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setVertRelTo</h4>
<pre>public void setVertRelTo(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/VertRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">VertRelTo</a> vertRelTo)</pre>
<div class="block">세로 위치의 기준을 설정한다. (3~4 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>vertRelTo</code> - 세로 위치의 기준</dd>
</dl>
</li>
</ul>
<a name="getVertRelativeArrange--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getVertRelativeArrange</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> getVertRelativeArrange()</pre>
<div class="block">세로 위치의 기준에 대한 상대적인 배열방식을 반환한다. (5~7 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>세로 위치의 기준에 대한 상대적인 배열방식</dd>
</dl>
</li>
</ul>
<a name="setVertRelativeArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.RelativeArrange-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setVertRelativeArrange</h4>
<pre>public void setVertRelativeArrange(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> vertRelativeArrange)</pre>
<div class="block">세로 위치의 기준에 대한 상대적인 배열방식를 설정한다. (5~7 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>vertRelativeArrange</code> - 세로 위치의 기준에 대한 상대적인 배열방식</dd>
</dl>
</li>
</ul>
<a name="getHorzRelTo--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getHorzRelTo</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HorzRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HorzRelTo</a> getHorzRelTo()</pre>
<div class="block">가로 위치의 기준을 반환한다. (8~9 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>가로 위치의 기준</dd>
</dl>
</li>
</ul>
<a name="setHorzRelTo-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.HorzRelTo-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setHorzRelTo</h4>
<pre>public void setHorzRelTo(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HorzRelTo.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HorzRelTo</a> horzRelTo)</pre>
<div class="block">가로 위치의 기준을 설정한다. (8~9 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>horzRelTo</code> - 가로 위치의 기준</dd>
</dl>
</li>
</ul>
<a name="getHorzRelativeArrange--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getHorzRelativeArrange</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> getHorzRelativeArrange()</pre>
<div class="block">HorzRelTo에 대한 상대적인 배열방식을 반환한다. (10~12 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>HorzRelTo에 대한 상대적인 배열방식</dd>
</dl>
</li>
</ul>
<a name="setHorzRelativeArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.RelativeArrange-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setHorzRelativeArrange</h4>
<pre>public void setHorzRelativeArrange(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/RelativeArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">RelativeArrange</a> horzRelativeArrange)</pre>
<div class="block">HorzRelTo에 대한 상대적인 배열방식을 설정한다. (10~12 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>horzRelativeArrange</code> - HorzRelTo에 대한 상대적인 배열방식</dd>
</dl>
</li>
</ul>
<a name="isVertRelToParaLimit--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isVertRelToParaLimit</h4>
<pre>public boolean isVertRelToParaLimit()</pre>
<div class="block">VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부를 반환한다. (13 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부</dd>
</dl>
</li>
</ul>
<a name="setVertRelToParaLimit-boolean-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setVertRelToParaLimit</h4>
<pre>public void setVertRelToParaLimit(boolean vertRelToParaLimit)</pre>
<div class="block">VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부을 설정한다.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>vertRelToParaLimit</code> - VertRelTo이 ‘para’일 때 오브젝트의 세로 위치를 본문 영역으로 제한할지 여부</dd>
</dl>
</li>
</ul>
<a name="isAllowOverlap--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isAllowOverlap</h4>
<pre>public boolean isAllowOverlap()</pre>
<div class="block">다른 오브젝트와 겹치는 것을 허용할지 여부을 반한한다. (14 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>다른 오브젝트와 겹치는 것을 허용할지 여부</dd>
</dl>
</li>
</ul>
<a name="setAllowOverlap-boolean-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setAllowOverlap</h4>
<pre>public void setAllowOverlap(boolean allowOverlap)</pre>
<div class="block">다른 오브젝트와 겹치는 것을 허용할지 여부을 설정한다. (14 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>allowOverlap</code> - 다른 오브젝트와 겹치는 것을 허용할지 여부</dd>
</dl>
</li>
</ul>
<a name="getWidthCriterion--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getWidthCriterion</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/WidthCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">WidthCriterion</a> getWidthCriterion()</pre>
<div class="block">오브젝트 폭의 기준을 반환한다. (15~17 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>오브젝트 폭의 기준</dd>
</dl>
</li>
</ul>
<a name="setWidthCriterion-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.WidthCriterion-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setWidthCriterion</h4>
<pre>public void setWidthCriterion(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/WidthCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">WidthCriterion</a> widthCriterion)</pre>
<div class="block">오브젝트 폭의 기준을 설정한다. (15~17 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>widthCriterion</code> - 오브젝트 폭의 기준</dd>
</dl>
</li>
</ul>
<a name="getHeightCriterion--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getHeightCriterion</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HeightCriterion</a> getHeightCriterion()</pre>
<div class="block">오브젝트 높이의 기준을 반환한다 (18~19 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>오브젝트 높이의 기준</dd>
</dl>
</li>
</ul>
<a name="setHeightCriterion-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.HeightCriterion-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setHeightCriterion</h4>
<pre>public void setHeightCriterion(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">HeightCriterion</a> heightCriterion)</pre>
<div class="block">오브젝트 높이의 기준을 설정한다. (18~19 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>heightCriterion</code> - 오브젝트 높이의 기준</dd>
</dl>
</li>
</ul>
<a name="isProtectSize--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isProtectSize</h4>
<pre>public boolean isProtectSize()</pre>
<div class="block">VertRelTo이 para일 때 크기 보호 여부을 반환한다. (20 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>VertRelTo이 para일 때 크기 보호 여부</dd>
</dl>
</li>
</ul>
<a name="setProtectSize-boolean-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setProtectSize</h4>
<pre>public void setProtectSize(boolean protectSize)</pre>
<div class="block">VertRelTo이 para일 때 크기 보호 여부를 설정한다.</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>protectSize</code> - VertRelTo이 para일 때 크기 보호 여부</dd>
</dl>
</li>
</ul>
<a name="getTextFlowMethod--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getTextFlowMethod</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextFlowMethod.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextFlowMethod</a> getTextFlowMethod()</pre>
<div class="block">오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션을 반환한다. (21~23 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션</dd>
</dl>
</li>
</ul>
<a name="setTextFlowMethod-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.TextFlowMethod-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setTextFlowMethod</h4>
<pre>public void setTextFlowMethod(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextFlowMethod.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextFlowMethod</a> textFlowMethod)</pre>
<div class="block">오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션을 설정한다. (21~23 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>textFlowMethod</code> - 오브젝트 주위를 텍스트가 어떻게 흘러갈지 지정하는 옵션</dd>
</dl>
</li>
</ul>
<a name="getTextHorzArrange--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getTextHorzArrange</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextHorzArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextHorzArrange</a> getTextHorzArrange()</pre>
<div class="block">오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션을 반환한다. (24~25 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션</dd>
</dl>
</li>
</ul>
<a name="setTextHorzArrange-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.TextHorzArrange-">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>setTextHorzArrange</h4>
<pre>public void setTextHorzArrange(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/TextHorzArrange.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">TextHorzArrange</a> textHorzArrange)</pre>
<div class="block">오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션을 설정한다. (24~25 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>textHorzArrange</code> - 오브젝트의 좌/우 어느 쪽에 글을 배치할지 지정하는 옵션</dd>
</dl>
</li>
</ul>
<a name="getObjectNumberSort--">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getObjectNumberSort</h4>
<pre>public <a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/ObjectNumberSort.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">ObjectNumberSort</a> getObjectNumberSort()</pre>
<div class="block">개체가 속하는 번호 범주를 반환한다. (26~28 bit)</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>개체가 속하는 번호 범주</dd>
</dl>
</li>
</ul>
<a name="setObjectNumberSort-kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso.ObjectNumberSort-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>setObjectNumberSort</h4>
<pre>public void setObjectNumberSort(<a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/ObjectNumberSort.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso">ObjectNumberSort</a> objectNumberSort)</pre>
<div class="block">개체가 속하는 번호 범주를 설정한다. (26~28 bit)</div>
<dl>
<dt><span class="paramLabel">Parameters:</span></dt>
<dd><code>objectNumberSort</code> - 개체가 속하는 번호 범주</dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/GsoHeaderProperty.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../../../../../kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/HeightCriterion.html" title="enum in kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.gso"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?kr/dogfoot/hwplib/object/bodytext/control/ctrlheader/gso/GsoHeaderProperty.html" target="_top">Frames</a></li>
<li><a href="GsoHeaderProperty.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2020. All rights reserved.</small></p>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
/*
* linux/mm/filemap.c
*
* Copyright (C) 1994-1999 Linus Torvalds
*/
/*
* This file handles the generic file mmap semantics used by
* most "normal" filesystems (but you don't /have/ to use this:
* the NFS filesystem used to do this differently, for example)
*/
#include <linux/export.h>
#include <linux/compiler.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include <linux/shmem_fs.h>
#include <linux/rmap.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/filemap.h>
/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
*
* Shared mappings now work. 15.8.1995 Bruno.
*
* finished 'unifying' the page and buffer cache and SMP-threaded the
* page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
*
* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
*/
/*
* Lock ordering:
*
* ->i_mmap_rwsem (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->i_pages lock
*
* ->i_mutex
* ->i_mmap_rwsem (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_rwsem
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
*
* ->i_mutex (generic_perform_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
* ->i_pages lock (__sync_single_inode)
*
* ->i_mmap_rwsem
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
*
* ->page_table_lock or pte_lock
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->i_pages lock (try_to_unmap_one)
* ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
* ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty)
* ->i_pages lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
* ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
* ->i_mmap_rwsem
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
{
struct radix_tree_node *node;
void **slot;
int error;
error = __radix_tree_create(&mapping->i_pages, page->index, 0,
&node, &slot);
if (error)
return error;
if (*slot) {
void *p;
p = radix_tree_deref_slot_protected(slot,
&mapping->i_pages.xa_lock);
if (!radix_tree_exceptional_entry(p))
return -EEXIST;
mapping->nrexceptional--;
if (shadowp)
*shadowp = p;
}
__radix_tree_replace(&mapping->i_pages, node, slot, page,
workingset_lookup_update(mapping));
mapping->nrpages++;
return 0;
}
static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
int i, nr;
/* hugetlb pages are represented by one entry in the radix tree */
nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
for (i = 0; i < nr; i++) {
struct radix_tree_node *node;
void **slot;
__radix_tree_lookup(&mapping->i_pages, page->index + i,
&node, &slot);
VM_BUG_ON_PAGE(!node && nr != 1, page);
radix_tree_clear_tags(&mapping->i_pages, node, slot);
__radix_tree_replace(&mapping->i_pages, node, slot, shadow,
workingset_lookup_update(mapping));
}
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
if (shadow) {
mapping->nrexceptional += nr;
/*
* Make sure the nrexceptional update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages -= nr;
}
static void unaccount_page_cache_page(struct address_space *mapping,
struct page *page)
{
int nr;
/*
* if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
if (PageUptodate(page) && PageMappedToDisk(page))
cleancache_put_page(page);
else
cleancache_invalidate_page(mapping, page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(page_mapped(page), page);
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
int mapcount;
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
dump_page(page, "still mapped when deleted");
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
mapcount = page_mapcount(page);
if (mapping_exiting(mapping) &&
page_count(page) >= mapcount + 2) {
/*
* All vmas have already been torn down, so it's
* a good bet that actually the page is unmapped,
* and we'd prefer not to leak it: if we're wrong,
* some other bad page check should catch it later.
*/
page_mapcount_reset(page);
page_ref_sub(page, mapcount);
}
}
/* hugetlb pages do not participate in page cache accounting. */
if (PageHuge(page))
return;
nr = hpage_nr_pages(page);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
} else {
VM_BUG_ON_PAGE(PageTransHuge(page), page);
}
/*
* At this point page must be either written or cleaned by
* truncate. Dirty page here signals a bug and loss of
* unwritten data.
*
* This fixes dirty accounting after removing the page entirely
* but leaves PageDirty set: it has no effect for truncated
* page and anyway will be cleared before returning page into
* buddy allocator.
*/
if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
}
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the i_pages lock.
*/
void __delete_from_page_cache(struct page *page, void *shadow)
{
struct address_space *mapping = page->mapping;
trace_mm_filemap_delete_from_page_cache(page);
unaccount_page_cache_page(mapping, page);
page_cache_tree_delete(mapping, page, shadow);
}
static void page_cache_free_page(struct address_space *mapping,
struct page *page)
{
void (*freepage)(struct page *);
freepage = mapping->a_ops->freepage;
if (freepage)
freepage(page);
if (PageTransHuge(page) && !PageHuge(page)) {
page_ref_sub(page, HPAGE_PMD_NR);
VM_BUG_ON_PAGE(page_count(page) <= 0, page);
} else {
put_page(page);
}
}
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
*
* This must be called only on pages that have been verified to be in the page
* cache and locked. It will never put the page into the free list, the caller
* has a reference on the page.
*/
void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long flags;
BUG_ON(!PageLocked(page));
xa_lock_irqsave(&mapping->i_pages, flags);
__delete_from_page_cache(page, NULL);
xa_unlock_irqrestore(&mapping->i_pages, flags);
page_cache_free_page(mapping, page);
}
EXPORT_SYMBOL(delete_from_page_cache);
/*
* page_cache_tree_delete_batch - delete several pages from page cache
* @mapping: the mapping to which pages belong
* @pvec: pagevec with pages to delete
*
* The function walks over mapping->i_pages and removes pages passed in @pvec
* from the mapping. The function expects @pvec to be sorted by page index.
* It tolerates holes in @pvec (mapping entries at those indices are not
* modified). The function expects only THP head pages to be present in the
* @pvec and takes care to delete all corresponding tail pages from the
* mapping as well.
*
* The function expects the i_pages lock to be held.
*/
static void
page_cache_tree_delete_batch(struct address_space *mapping,
struct pagevec *pvec)
{
struct radix_tree_iter iter;
void **slot;
int total_pages = 0;
int i = 0, tail_pages = 0;
struct page *page;
pgoff_t start;
start = pvec->pages[0]->index;
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
if (i >= pagevec_count(pvec) && !tail_pages)
break;
page = radix_tree_deref_slot_protected(slot,
&mapping->i_pages.xa_lock);
if (radix_tree_exceptional_entry(page))
continue;
if (!tail_pages) {
/*
* Some page got inserted in our range? Skip it. We
* have our pages locked so they are protected from
* being removed.
*/
if (page != pvec->pages[i])
continue;
WARN_ON_ONCE(!PageLocked(page));
if (PageTransHuge(page) && !PageHuge(page))
tail_pages = HPAGE_PMD_NR - 1;
page->mapping = NULL;
/*
* Leave page->index set: truncation lookup relies
* upon it
*/
i++;
} else {
tail_pages--;
}
radix_tree_clear_tags(&mapping->i_pages, iter.node, slot);
__radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL,
workingset_lookup_update(mapping));
total_pages++;
}
mapping->nrpages -= total_pages;
}
void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec)
{
int i;
unsigned long flags;
if (!pagevec_count(pvec))
return;
xa_lock_irqsave(&mapping->i_pages, flags);
for (i = 0; i < pagevec_count(pvec); i++) {
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
unaccount_page_cache_page(mapping, pvec->pages[i]);
}
page_cache_tree_delete_batch(mapping, pvec);
xa_unlock_irqrestore(&mapping->i_pages, flags);
for (i = 0; i < pagevec_count(pvec); i++)
page_cache_free_page(mapping, pvec->pages[i]);
}
int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
/* Check for outstanding write errors */
if (test_bit(AS_ENOSPC, &mapping->flags) &&
test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
if (test_bit(AS_EIO, &mapping->flags) &&
test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL(filemap_check_errors);
static int filemap_check_and_keep_errors(struct address_space *mapping)
{
/* Check for outstanding write errors */
if (test_bit(AS_EIO, &mapping->flags))
return -EIO;
if (test_bit(AS_ENOSPC, &mapping->flags))
return -ENOSPC;
return 0;
}
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
* Start writeback against all of a mapping's dirty pages that lie
* within the byte offsets <start, end> inclusive.
*
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
* opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end, int sync_mode)
{
int ret;
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = LONG_MAX,
.range_start = start,
.range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
return 0;
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
ret = do_writepages(mapping, &wbc);
wbc_detach_inode(&wbc);
return ret;
}
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite);
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end)
{
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite_range);
/**
* filemap_flush - mostly a non-blocking flush
* @mapping: target address_space
*
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
int filemap_flush(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
}
EXPORT_SYMBOL(filemap_flush);
/**
* filemap_range_has_page - check if a page exists in range.
* @mapping: address space within which to check
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Find at least one page in the range supplied, usually used to check if
* direct writing in this range will trigger a writeback.
*/
bool filemap_range_has_page(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_SHIFT;
pgoff_t end = end_byte >> PAGE_SHIFT;
struct page *page;
if (end_byte < start_byte)
return false;
if (mapping->nrpages == 0)
return false;
if (!find_get_pages_range(mapping, &index, end, 1, &page))
return false;
put_page(page);
return true;
}
EXPORT_SYMBOL(filemap_range_has_page);
static void __filemap_fdatawait_range(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_SHIFT;
pgoff_t end = end_byte >> PAGE_SHIFT;
struct pagevec pvec;
int nr_pages;
if (end_byte < start_byte)
return;
pagevec_init(&pvec);
while (index <= end) {
unsigned i;
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
end, PAGECACHE_TAG_WRITEBACK);
if (!nr_pages)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
wait_on_page_writeback(page);
ClearPageError(page);
}
pagevec_release(&pvec);
cond_resched();
}
}
/**
* filemap_fdatawait_range - wait for writeback to complete
* @mapping: address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the given address space
* in the given range and wait for all of them. Check error status of
* the address space and return it.
*
* Since the error status of the address space is cleared by this function,
* callers are responsible for checking the return value and handling and/or
* reporting the error.
*/
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
__filemap_fdatawait_range(mapping, start_byte, end_byte);
return filemap_check_errors(mapping);
}
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
* filemap_fdatawait_range_keep_errors - wait for writeback to complete
* @mapping: address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the given address space in the
* given range and wait for all of them. Unlike filemap_fdatawait_range(),
* this function does not clear error status of the address space.
*
* Use this function if callers don't handle errors themselves. Expected
* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
* fsfreeze(8)
*/
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
__filemap_fdatawait_range(mapping, start_byte, end_byte);
return filemap_check_and_keep_errors(mapping);
}
EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
/**
* file_fdatawait_range - wait for writeback to complete
* @file: file pointing to address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the address space that file
* refers to, in the given range and wait for all of them. Check error
* status of the address space vs. the file->f_wb_err cursor and return it.
*
* Since the error status of the file is advanced by this function,
* callers are responsible for checking the return value and handling and/or
* reporting the error.
*/
int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
{
struct address_space *mapping = file->f_mapping;
__filemap_fdatawait_range(mapping, start_byte, end_byte);
return file_check_and_advance_wb_err(file);
}
EXPORT_SYMBOL(file_fdatawait_range);
/**
* filemap_fdatawait_keep_errors - wait for writeback without clearing errors
* @mapping: address space structure to wait for
*
* Walk the list of under-writeback pages of the given address space
* and wait for all of them. Unlike filemap_fdatawait(), this function
* does not clear error status of the address space.
*
* Use this function if callers don't handle errors themselves. Expected
* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
* fsfreeze(8)
*/
int filemap_fdatawait_keep_errors(struct address_space *mapping)
{
__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
return filemap_check_and_keep_errors(mapping);
}
EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
static bool mapping_needs_writeback(struct address_space *mapping)
{
return (!dax_mapping(mapping) && mapping->nrpages) ||
(dax_mapping(mapping) && mapping->nrexceptional);
}
int filemap_write_and_wait(struct address_space *mapping)
{
int err = 0;
if (mapping_needs_writeback(mapping)) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
* written partially (e.g. -ENOSPC), so we wait for it.
* But the -EIO is special case, it may indicate the worst
* thing (e.g. bug) happened, so we avoid waiting for it.
*/
if (err != -EIO) {
int err2 = filemap_fdatawait(mapping);
if (!err)
err = err2;
} else {
/* Clear any previously stored errors */
filemap_check_errors(mapping);
}
} else {
err = filemap_check_errors(mapping);
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait);
/**
* filemap_write_and_wait_range - write out & wait on a file range
* @mapping: the address_space for the pages
* @lstart: offset in bytes where the range starts
* @lend: offset in bytes where the range ends (inclusive)
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that @lend is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*/
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
int err = 0;
if (mapping_needs_writeback(mapping)) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
int err2 = filemap_fdatawait_range(mapping,
lstart, lend);
if (!err)
err = err2;
} else {
/* Clear any previously stored errors */
filemap_check_errors(mapping);
}
} else {
err = filemap_check_errors(mapping);
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
void __filemap_set_wb_err(struct address_space *mapping, int err)
{
errseq_t eseq = errseq_set(&mapping->wb_err, err);
trace_filemap_set_wb_err(mapping, eseq);
}
EXPORT_SYMBOL(__filemap_set_wb_err);
/**
* file_check_and_advance_wb_err - report wb error (if any) that was previously
* and advance wb_err to current one
* @file: struct file on which the error is being reported
*
* When userland calls fsync (or something like nfsd does the equivalent), we
* want to report any writeback errors that occurred since the last fsync (or
* since the file was opened if there haven't been any).
*
* Grab the wb_err from the mapping. If it matches what we have in the file,
* then just quickly return 0. The file is all caught up.
*
* If it doesn't match, then take the mapping value, set the "seen" flag in
* it and try to swap it into place. If it works, or another task beat us
* to it with the new value, then update the f_wb_err and return the error
* portion. The error at this point must be reported via proper channels
* (a'la fsync, or NFS COMMIT operation, etc.).
*
* While we handle mapping->wb_err with atomic operations, the f_wb_err
* value is protected by the f_lock since we must ensure that it reflects
* the latest value swapped in for this file descriptor.
*/
int file_check_and_advance_wb_err(struct file *file)
{
int err = 0;
errseq_t old = READ_ONCE(file->f_wb_err);
struct address_space *mapping = file->f_mapping;
/* Locklessly handle the common case where nothing has changed */
if (errseq_check(&mapping->wb_err, old)) {
/* Something changed, must use slow path */
spin_lock(&file->f_lock);
old = file->f_wb_err;
err = errseq_check_and_advance(&mapping->wb_err,
&file->f_wb_err);
trace_file_check_and_advance_wb_err(file, old);
spin_unlock(&file->f_lock);
}
/*
* We're mostly using this function as a drop in replacement for
* filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
* that the legacy code would have had on these flags.
*/
clear_bit(AS_EIO, &mapping->flags);
clear_bit(AS_ENOSPC, &mapping->flags);
return err;
}
EXPORT_SYMBOL(file_check_and_advance_wb_err);
/**
* file_write_and_wait_range - write out & wait on a file range
* @file: file pointing to address_space with pages
* @lstart: offset in bytes where the range starts
* @lend: offset in bytes where the range ends (inclusive)
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that @lend is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*
* After writing out and waiting on the data, we check and advance the
* f_wb_err cursor to the latest value, and return any errors detected there.
*/
int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
{
int err = 0, err2;
struct address_space *mapping = file->f_mapping;
if (mapping_needs_writeback(mapping)) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO)
__filemap_fdatawait_range(mapping, lstart, lend);
}
err2 = file_check_and_advance_wb_err(file);
if (!err)
err = err2;
return err;
}
EXPORT_SYMBOL(file_write_and_wait_range);
/**
* replace_page_cache_page - replace a pagecache page with a new one
* @old: page to be replaced
* @new: page to replace with
* @gfp_mask: allocation mode
*
* This function replaces a page in the pagecache with a new one. On
* success it acquires the pagecache reference for the new page and
* drops it for the old page. Both the old and new pages must be
* locked. This function does not add the new page to the LRU, the
* caller must do that.
*
* The remove + add is atomic. The only way this function can fail is
* memory allocation failure.
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
unsigned long flags;
pgoff_t offset = old->index;
freepage = mapping->a_ops->freepage;
get_page(new);
new->mapping = mapping;
new->index = offset;
xa_lock_irqsave(&mapping->i_pages, flags);
__delete_from_page_cache(old, NULL);
error = page_cache_tree_insert(mapping, new, NULL);
BUG_ON(error);
/*
* hugetlb pages do not participate in page cache accounting.
*/
if (!PageHuge(new))
__inc_node_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(new))
__inc_node_page_state(new, NR_SHMEM);
xa_unlock_irqrestore(&mapping->i_pages, flags);
mem_cgroup_migrate(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
put_page(old);
}
return error;
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
static int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask,
void **shadowp)
{
int huge = PageHuge(page);
struct mem_cgroup *memcg;
int error;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
if (!huge) {
error = mem_cgroup_try_charge(page, current->mm,
gfp_mask, &memcg, false);
if (error)
return error;
}
error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
if (error) {
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
return error;
}
get_page(page);
page->mapping = mapping;
page->index = offset;
xa_lock_irq(&mapping->i_pages);
error = page_cache_tree_insert(mapping, page, shadowp);
radix_tree_preload_end();
if (unlikely(error))
goto err_insert;
/* hugetlb pages do not participate in page cache accounting. */
if (!huge)
__inc_node_page_state(page, NR_FILE_PAGES);
xa_unlock_irq(&mapping->i_pages);
if (!huge)
mem_cgroup_commit_charge(page, memcg, false, false);
trace_mm_filemap_add_to_page_cache(page);
return 0;
err_insert:
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
xa_unlock_irq(&mapping->i_pages);
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
return error;
}
/**
* add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
*
* This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
return __add_to_page_cache_locked(page, mapping, offset,
gfp_mask, NULL);
}
EXPORT_SYMBOL(add_to_page_cache_locked);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
void *shadow = NULL;
int ret;
__SetPageLocked(page);
ret = __add_to_page_cache_locked(page, mapping, offset,
gfp_mask, &shadow);
if (unlikely(ret))
__ClearPageLocked(page);
else {
/*
* The page might have been evicted from cache only
* recently, in which case it should be activated like
* any other repeatedly accessed page.
* The exception is pages getting rewritten; evicting other
* data from the working set, only to cache data that will
* get overwritten with something else, is a waste of memory.
*/
if (!(gfp_mask & __GFP_WRITE) &&
shadow && workingset_refault(shadow)) {
SetPageActive(page);
workingset_activation(page);
} else
ClearPageActive(page);
lru_cache_add(page);
}
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
#ifdef CONFIG_NUMA
struct page *__page_cache_alloc(gfp_t gfp)
{
int n;
struct page *page;
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
page = __alloc_pages_node(n, gfp, 0);
} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
return page;
}
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
#define PAGE_WAIT_TABLE_BITS 8
#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
static wait_queue_head_t *page_waitqueue(struct page *page)
{
return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
}
void __init pagecache_init(void)
{
int i;
for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(&page_wait_table[i]);
page_writeback_init();
}
/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
struct wait_page_key {
struct page *page;
int bit_nr;
int page_match;
};
struct wait_page_queue {
struct page *page;
int bit_nr;
wait_queue_entry_t wait;
};
static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
{
struct wait_page_key *key = arg;
struct wait_page_queue *wait_page
= container_of(wait, struct wait_page_queue, wait);
if (wait_page->page != key->page)
return 0;
key->page_match = 1;
if (wait_page->bit_nr != key->bit_nr)
return 0;
/* Stop walking if it's locked */
if (test_bit(key->bit_nr, &key->page->flags))
return -1;
return autoremove_wake_function(wait, mode, sync, key);
}
static void wake_up_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
struct wait_page_key key;
unsigned long flags;
wait_queue_entry_t bookmark;
key.page = page;
key.bit_nr = bit_nr;
key.page_match = 0;
bookmark.flags = 0;
bookmark.private = NULL;
bookmark.func = NULL;
INIT_LIST_HEAD(&bookmark.entry);
spin_lock_irqsave(&q->lock, flags);
__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
while (bookmark.flags & WQ_FLAG_BOOKMARK) {
/*
* Take a breather from holding the lock,
* allow pages that finish wake up asynchronously
* to acquire the lock and remove themselves
* from wait queue
*/
spin_unlock_irqrestore(&q->lock, flags);
cpu_relax();
spin_lock_irqsave(&q->lock, flags);
__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
}
/*
* It is possible for other pages to have collided on the waitqueue
* hash, so in that case check for a page match. That prevents a long-
* term waiter
*
* It is still possible to miss a case here, when we woke page waiters
* and removed them from the waitqueue, but there are still other
* page waiters.
*/
if (!waitqueue_active(q) || !key.page_match) {
ClearPageWaiters(page);
/*
* It's possible to miss clearing Waiters here, when we woke
* our page waiters, but the hashed waitqueue has waiters for
* other pages on it.
*
* That's okay, it's a rare case. The next waker will clear it.
*/
}
spin_unlock_irqrestore(&q->lock, flags);
}
static void wake_up_page(struct page *page, int bit)
{
if (!PageWaiters(page))
return;
wake_up_page_bit(page, bit);
}
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, bool lock)
{
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
int ret = 0;
init_wait(wait);
wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
wait->func = wake_page_function;
wait_page.page = page;
wait_page.bit_nr = bit_nr;
for (;;) {
spin_lock_irq(&q->lock);
if (likely(list_empty(&wait->entry))) {
__add_wait_queue_entry_tail(q, wait);
SetPageWaiters(page);
}
set_current_state(state);
spin_unlock_irq(&q->lock);
if (likely(test_bit(bit_nr, &page->flags))) {
io_schedule();
}
if (lock) {
if (!test_and_set_bit_lock(bit_nr, &page->flags))
break;
} else {
if (!test_bit(bit_nr, &page->flags))
break;
}
if (unlikely(signal_pending_state(state, current))) {
ret = -EINTR;
break;
}
}
finish_wait(q, wait);
/*
* A signal could leave PageWaiters set. Clearing it here if
* !waitqueue_active would be possible (by open-coding finish_wait),
* but still fail to catch it in the case of wait hash collision. We
* already can fail to clear wait hash collision cases, so don't
* bother with signals either.
*/
return ret;
}
void wait_on_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
}
EXPORT_SYMBOL(wait_on_page_bit);
int wait_on_page_bit_killable(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
}
EXPORT_SYMBOL(wait_on_page_bit_killable);
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
* @page: Page defining the wait queue of interest
* @waiter: Waiter to add to the queue
*
* Add an arbitrary @waiter to the wait queue for the nominated @page.
*/
void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
{
wait_queue_head_t *q = page_waitqueue(page);
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue_entry_tail(q, waiter);
SetPageWaiters(page);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(add_page_wait_queue);
#ifndef clear_bit_unlock_is_negative_byte
/*
* PG_waiters is the high bit in the same byte as PG_lock.
*
* On x86 (and on many other architectures), we can clear PG_lock and
* test the sign bit at the same time. But if the architecture does
* not support that special operation, we just do this all by hand
* instead.
*
* The read of PG_waiters has to be after (or concurrently with) PG_locked
* being cleared, but a memory barrier should be unneccssary since it is
* in the same byte as PG_locked.
*/
static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
{
clear_bit_unlock(nr, mem);
/* smp_mb__after_atomic(); */
return test_bit(PG_waiters, mem);
}
#endif
/**
* unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
* mechanism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
* Note that this depends on PG_waiters being the sign bit in the byte
* that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
* clear the PG_locked bit and test PG_waiters at the same time fairly
* portably (architectures that do LL/SC can test any bit, while x86 can
* test the sign bit).
*/
void unlock_page(struct page *page)
{
BUILD_BUG_ON(PG_waiters != 7);
page = compound_head(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
wake_up_page_bit(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
/**
* end_page_writeback - end writeback against a page
* @page: the page
*/
void end_page_writeback(struct page *page)
{
/*
* TestClearPageReclaim could be used here but it is an atomic
* operation and overkill in this particular case. Failing to
* shuffle a page marked for immediate reclaim is too mild to
* justify taking an atomic operation penalty at the end of
* ever page writeback.
*/
if (PageReclaim(page)) {
ClearPageReclaim(page);
rotate_reclaimable_page(page);
}
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
/*
* After completing I/O on a page, call this routine to update the page
* flags appropriately
*/
void page_endio(struct page *page, bool is_write, int err)
{
if (!is_write) {
if (!err) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
} else {
if (err) {
struct address_space *mapping;
SetPageError(page);
mapping = page_mapping(page);
if (mapping)
mapping_set_error(mapping, err);
}
end_page_writeback(page);
}
}
EXPORT_SYMBOL_GPL(page_endio);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @__page: the page to lock
*/
void __lock_page(struct page *__page)
{
struct page *page = compound_head(__page);
wait_queue_head_t *q = page_waitqueue(page);
wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
}
EXPORT_SYMBOL(__lock_page);
int __lock_page_killable(struct page *__page)
{
struct page *page = compound_head(__page);
wait_queue_head_t *q = page_waitqueue(page);
return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
/*
* Return values:
* 1 - page is locked; mmap_sem is still held.
* 0 - page is not locked.
* mmap_sem has been released (up_read()), unless flags had both
* FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
* which case mmap_sem is still held.
*
* If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
* with the page locked and the mmap_sem unperturbed.
*/
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/*
* CAUTION! In this case, mmap_sem is not released
* even though return 0.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
up_read(&mm->mmap_sem);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
wait_on_page_locked(page);
return 0;
} else {
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
ret = __lock_page_killable(page);
if (ret) {
up_read(&mm->mmap_sem);
return 0;
}
} else
__lock_page(page);
return 1;
}
}
/**
* page_cache_next_hole - find the next hole (not-present entry)
* @mapping: mapping
* @index: index
* @max_scan: maximum range to search
*
* Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
* lowest indexed hole.
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'return - index >=
* max_scan' will be true). In rare cases of index wrap-around, 0 will
* be returned.
*
* page_cache_next_hole may be called under rcu_read_lock. However,
* like radix_tree_gang_lookup, this will not atomically search a
* snapshot of the tree at a single point in time. For example, if a
* hole is created at index 5, then subsequently a hole is created at
* index 10, page_cache_next_hole covering both indexes may return 10
* if called under rcu_read_lock.
*/
pgoff_t page_cache_next_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan)
{
unsigned long i;
for (i = 0; i < max_scan; i++) {
struct page *page;
page = radix_tree_lookup(&mapping->i_pages, index);
if (!page || radix_tree_exceptional_entry(page))
break;
index++;
if (index == 0)
break;
}
return index;
}
EXPORT_SYMBOL(page_cache_next_hole);
/**
* page_cache_prev_hole - find the prev hole (not-present entry)
* @mapping: mapping
* @index: index
* @max_scan: maximum range to search
*
* Search backwards in the range [max(index-max_scan+1, 0), index] for
* the first hole.
*
* Returns: the index of the hole if found, otherwise returns an index
* outside of the set specified (in which case 'index - return >=
* max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
* will be returned.
*
* page_cache_prev_hole may be called under rcu_read_lock. However,
* like radix_tree_gang_lookup, this will not atomically search a
* snapshot of the tree at a single point in time. For example, if a
* hole is created at index 10, then subsequently a hole is created at
* index 5, page_cache_prev_hole covering both indexes may return 5 if
* called under rcu_read_lock.
*/
pgoff_t page_cache_prev_hole(struct address_space *mapping,
pgoff_t index, unsigned long max_scan)
{
unsigned long i;
for (i = 0; i < max_scan; i++) {
struct page *page;
page = radix_tree_lookup(&mapping->i_pages, index);
if (!page || radix_tree_exceptional_entry(page))
break;
index--;
if (index == ULONG_MAX)
break;
}
return index;
}
EXPORT_SYMBOL(page_cache_prev_hole);
/**
* find_get_entry - find and get a page cache entry
* @mapping: the address_space to search
* @offset: the page cache index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount.
*
* If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned.
*
* Otherwise, %NULL is returned.
*/
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
{
void **pagep;
struct page *head, *page;
rcu_read_lock();
repeat:
page = NULL;
pagep = radix_tree_lookup_slot(&mapping->i_pages, offset);
if (pagep) {
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
goto out;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto repeat;
/*
* A shadow entry of a recently evicted page,
* or a swap entry from shmem/tmpfs. Return
* it without attempting to raise page count.
*/
goto out;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/*
* Has the page moved?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
put_page(head);
goto repeat;
}
}
out:
rcu_read_unlock();
return page;
}
EXPORT_SYMBOL(find_get_entry);
/**
* find_lock_entry - locate, pin and lock a page cache entry
* @mapping: the address_space to search
* @offset: the page cache index
*
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
* If the slot holds a shadow entry of a previously evicted page, or a
* swap entry from shmem/tmpfs, it is returned.
*
* Otherwise, %NULL is returned.
*
* find_lock_entry() may sleep.
*/
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
{
struct page *page;
repeat:
page = find_get_entry(mapping, offset);
if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page_mapping(page) != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
}
VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
}
return page;
}
EXPORT_SYMBOL(find_lock_entry);
/**
* pagecache_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
* @gfp_mask: gfp mask to use for the page cache data page allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
* PCG flags modify how the page is returned.
*
* @fgp_flags can be:
*
* - FGP_ACCESSED: the page will be marked accessed
* - FGP_LOCK: Page is return locked
* - FGP_CREAT: If page is not present then a new page is allocated using
* @gfp_mask and added to the page cache and the VM's LRU
* list. The page is returned locked and with an increased
* refcount. Otherwise, NULL is returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
*
* If there is a page cache page, it is returned with an increased refcount.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
repeat:
page = find_get_entry(mapping, offset);
if (radix_tree_exceptional_entry(page))
page = NULL;
if (!page)
goto no_page;
if (fgp_flags & FGP_LOCK) {
if (fgp_flags & FGP_NOWAIT) {
if (!trylock_page(page)) {
put_page(page);
return NULL;
}
} else {
lock_page(page);
}
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
}
VM_BUG_ON_PAGE(page->index != offset, page);
}
if (page && (fgp_flags & FGP_ACCESSED))
mark_page_accessed(page);
no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
gfp_mask |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
gfp_mask &= ~__GFP_FS;
page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (unlikely(err)) {
put_page(page);
page = NULL;
if (err == -EEXIST)
goto repeat;
}
}
return page;
}
EXPORT_SYMBOL(pagecache_get_page);
/**
* find_get_entries - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page cache index
* @nr_entries: The maximum number of entries
* @entries: Where the resulting entries are placed
* @indices: The cache indices corresponding to the entries in @entries
*
* find_get_entries() will search for and return a group of up to
* @nr_entries entries in the mapping. The entries are placed at
* @entries. find_get_entries() takes a reference against any actual
* pages it returns.
*
* The search returns a group of mapping-contiguous page cache entries
* with ascending indexes. There may be holes in the indices due to
* not-present pages.
*
* Any shadow entries of evicted pages, or swap entries from
* shmem/tmpfs, are included in the returned array.
*
* find_get_entries() returns the number of pages and shadow entries
* which were found.
*/
unsigned find_get_entries(struct address_space *mapping,
pgoff_t start, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
{
void **slot;
unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_entries)
return 0;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
struct page *head, *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
/*
* A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count.
*/
goto export;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
export:
indices[ret] = iter.index;
entries[ret] = page;
if (++ret == nr_entries)
break;
}
rcu_read_unlock();
return ret;
}
/**
* find_get_pages_range - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index (inclusive)
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_range() will search for and return a group of up to @nr_pages
* pages in the mapping starting at index @start and up to index @end
* (inclusive). The pages are placed at @pages. find_get_pages_range() takes
* a reference against the returned pages.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
* We also update @start to index the next page for the traversal.
*
* find_get_pages_range() returns the number of pages which were found. If this
* number is smaller than @nr_pages, the end of specified range has been
* reached.
*/
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, *start) {
struct page *head, *page;
if (iter.index > end)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
/*
* A shadow entry of a recently evicted page,
* or a swap entry from shmem/tmpfs. Skip
* over it.
*/
continue;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages) {
*start = pages[ret - 1]->index + 1;
goto out;
}
}
/*
* We come here when there is no page beyond @end. We take care to not
* overflow the index @start as it confuses some of the callers. This
* breaks the iteration when there is page at index -1 but that is
* already broken anyway.
*/
if (end == (pgoff_t)-1)
*start = (pgoff_t)-1;
else
*start = end + 1;
out:
rcu_read_unlock();
return ret;
}
/**
* find_get_pages_contig - gang contiguous pagecache lookup
* @mapping: The address_space to search
* @index: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_contig() works exactly like find_get_pages(), except
* that the returned number of pages are guaranteed to be contiguous.
*
* find_get_pages_contig() returns the number of pages which were found.
*/
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) {
struct page *head, *page;
repeat:
page = radix_tree_deref_slot(slot);
/* The hole, there no reason to continue */
if (unlikely(!page))
break;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
/*
* A shadow entry of a recently evicted page,
* or a swap entry from shmem/tmpfs. Stop
* looking for contiguous pages.
*/
break;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
put_page(page);
break;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_pages_contig);
/**
* find_get_pages_range_tag - find and return pages in given range matching @tag
* @mapping: the address_space to search
* @index: the starting page index
* @end: The final page index (inclusive)
* @tag: the tag index
* @nr_pages: the maximum number of pages
* @pages: where the resulting pages are placed
*
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages,
struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) {
struct page *head, *page;
if (iter.index > end)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
/*
* A shadow entry of a recently evicted page.
*
* Those entries should never be tagged, but
* this tree walk is lockless and the tags are
* looked up in bulk, one radix tree node at a
* time, so there is a sizable window for page
* reclaim to evict a page we saw tagged.
*
* Skip over it.
*/
continue;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages) {
*index = pages[ret - 1]->index + 1;
goto out;
}
}
/*
* We come here when we got at @end. We take care to not overflow the
* index @index as it confuses some of the callers. This breaks the
* iteration when there is page at index -1 but that is already broken
* anyway.
*/
if (end == (pgoff_t)-1)
*index = (pgoff_t)-1;
else
*index = end + 1;
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_pages_range_tag);
/**
* find_get_entries_tag - find and return entries that match @tag
* @mapping: the address_space to search
* @start: the starting page cache index
* @tag: the tag index
* @nr_entries: the maximum number of entries
* @entries: where the resulting entries are placed
* @indices: the cache indices corresponding to the entries in @entries
*
* Like find_get_entries, except we only return entries which are tagged with
* @tag.
*/
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
{
void **slot;
unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_entries)
return 0;
rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) {
struct page *head, *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
/*
* A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count.
*/
goto export;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
export:
indices[ret] = iter.index;
entries[ret] = page;
if (++ret == nr_entries)
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_entries_tag);
/*
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
* a _large_ part of the i/o request. Imagine the worst scenario:
*
* ---R__________________________________________B__________
* ^ reading here ^ bad block(assume 4k)
*
* read(R) => miss => readahead(R...B) => media error => frustrating retries
* => failing the whole request => read(R) => read(R+1) =>
* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
*
* It is going insane. Fix it by quickly scaling down the readahead size.
*/
static void shrink_readahead_size_eio(struct file *filp,
struct file_ra_state *ra)
{
ra->ra_pages /= 4;
}
/**
* generic_file_buffered_read - generic file read routine
* @iocb: the iocb to read
* @iter: data destination
* @written: already copied
*
* This is a generic file read routine, and uses the
* mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*/
static ssize_t generic_file_buffered_read(struct kiocb *iocb,
struct iov_iter *iter, ssize_t written)
{
struct file *filp = iocb->ki_filp;
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
struct file_ra_state *ra = &filp->f_ra;
loff_t *ppos = &iocb->ki_pos;
pgoff_t index;
pgoff_t last_index;
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
unsigned int prev_offset;
int error = 0;
if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
return 0;
iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
index = *ppos >> PAGE_SHIFT;
prev_index = ra->prev_pos >> PAGE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_SIZE-1);
last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
offset = *ppos & ~PAGE_MASK;
for (;;) {
struct page *page;
pgoff_t end_index;
loff_t isize;
unsigned long nr, ret;
cond_resched();
find_page:
if (fatal_signal_pending(current)) {
error = -EINTR;
goto out;
}
page = find_get_page(mapping, index);
if (!page) {
if (iocb->ki_flags & IOCB_NOWAIT)
goto would_block;
page_cache_sync_readahead(mapping,
ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page)) {
if (iocb->ki_flags & IOCB_NOWAIT) {
put_page(page);
goto would_block;
}
/*
* See comment in do_read_cache_page on why
* wait_on_page_locked is used to avoid unnecessarily
* serialisations and why it's safe.
*/
error = wait_on_page_locked_killable(page);
if (unlikely(error))
goto readpage_error;
if (PageUptodate(page))
goto page_ok;
if (inode->i_blkbits == PAGE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
/* pipes can't handle partially uptodate pages */
if (unlikely(iter->type & ITER_PIPE))
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
if (!page->mapping)
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
offset, iter->count))
goto page_not_up_to_date_locked;
unlock_page(page);
}
page_ok:
/*
* i_size must be checked after we know the page is Uptodate.
*
* Checking i_size after the check allows us to calculate
* the correct value for "nr", which means the zero-filled
* part of the page is not copied back to userspace (unless
* another truncate extends the file - this is desired though).
*/
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_SHIFT;
if (unlikely(!isize || index > end_index)) {
put_page(page);
goto out;
}
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_SIZE;
if (index == end_index) {
nr = ((isize - 1) & ~PAGE_MASK) + 1;
if (nr <= offset) {
put_page(page);
goto out;
}
}
nr = nr - offset;
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* When a sequential read accesses a page several times,
* only mark it as accessed the first time.
*/
if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*/
ret = copy_page_to_iter(page, offset, nr, iter);
offset += ret;
index += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
prev_offset = offset;
put_page(page);
written += ret;
if (!iov_iter_count(iter))
goto out;
if (ret < nr) {
error = -EFAULT;
goto out;
}
continue;
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
unlock_page(page);
put_page(page);
continue;
}
/* Did somebody else fill it already? */
if (PageUptodate(page)) {
unlock_page(page);
goto page_ok;
}
readpage:
/*
* A previous I/O error may have been due to temporary
* failures, eg. multipath errors.
* PG_error will be set again if readpage fails.
*/
ClearPageError(page);
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
put_page(page);
error = 0;
goto find_page;
}
goto readpage_error;
}
if (!PageUptodate(page)) {
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
* invalidate_mapping_pages got it
*/
unlock_page(page);
put_page(page);
goto find_page;
}
unlock_page(page);
shrink_readahead_size_eio(filp, ra);
error = -EIO;
goto readpage_error;
}
unlock_page(page);
}
goto page_ok;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
put_page(page);
goto out;
no_cached_page:
/*
* Ok, it wasn't cached, so we need to create a new
* page..
*/
page = page_cache_alloc(mapping);
if (!page) {
error = -ENOMEM;
goto out;
}
error = add_to_page_cache_lru(page, mapping, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error) {
put_page(page);
if (error == -EEXIST) {
error = 0;
goto find_page;
}
goto out;
}
goto readpage;
}
would_block:
error = -EAGAIN;
out:
ra->prev_pos = prev_index;
ra->prev_pos <<= PAGE_SHIFT;
ra->prev_pos |= prev_offset;
*ppos = ((loff_t)index << PAGE_SHIFT) + offset;
file_accessed(filp);
return written ? written : error;
}
/**
* generic_file_read_iter - generic filesystem read routine
* @iocb: kernel I/O control block
* @iter: destination for the data read
*
* This is the "read_iter()" routine for all filesystems
* that can use the page cache directly.
*/
ssize_t
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
size_t count = iov_iter_count(iter);
ssize_t retval = 0;
if (!count)
goto out; /* skip atime */
if (iocb->ki_flags & IOCB_DIRECT) {
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
loff_t size;
size = i_size_read(inode);
if (iocb->ki_flags & IOCB_NOWAIT) {
if (filemap_range_has_page(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1))
return -EAGAIN;
} else {
retval = filemap_write_and_wait_range(mapping,
iocb->ki_pos,
iocb->ki_pos + count - 1);
if (retval < 0)
goto out;
}
file_accessed(file);
retval = mapping->a_ops->direct_IO(iocb, iter);
if (retval >= 0) {
iocb->ki_pos += retval;
count -= retval;
}
iov_iter_revert(iter, count - iov_iter_count(iter));
/*
* Btrfs can have a short DIO read if we encounter
* compressed extents, so if there was an error, or if
* we've already read everything we wanted to, or if
* there was a short read because we hit EOF, go ahead
* and return. Otherwise fallthrough to buffered io for
* the rest of the read. Buffered reads will not work for
* DAX files, so don't bother trying.
*/
if (retval < 0 || !count || iocb->ki_pos >= size ||
IS_DAX(inode))
goto out;
}
retval = generic_file_buffered_read(iocb, iter, retval);
out:
return retval;
}
EXPORT_SYMBOL(generic_file_read_iter);
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
* @gfp_mask: memory allocation flags
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
page = __page_cache_alloc(gfp_mask);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
ret = 0; /* losing race to add is OK */
put_page(page);
} while (ret == AOP_TRUNCATED_PAGE);
return ret;
}
#define MMAP_LOTSAMISS (100)
/*
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;
if (vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
}
/* Avoid banging the cache line if not needed */
if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ra->mmap_miss++;
/*
* Do we miss much more than hit in this file? If so,
* stop bothering with read-ahead. It will only hurt.
*/
if (ra->mmap_miss > MMAP_LOTSAMISS)
return;
/*
* mmap read-around
*/
ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
ra->size = ra->ra_pages;
ra->async_size = ra->ra_pages / 4;
ra_submit(ra, mapping, file);
}
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
if (PageReadahead(page))
page_cache_async_readahead(mapping, ra, file,
page, offset, ra->ra_pages);
}
/**
* filemap_fault - read in file data for page fault handling
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
* The goto's are kind of ugly, but this streamlines the normal case of having
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*
* vma->vm_mm->mmap_sem must be held on entry.
*
* If our return value has VM_FAULT_RETRY set, it's because
* lock_page_or_retry() returned 0.
* The mmap_sem has usually been released in this case.
* See __lock_page_or_retry() for the exception.
*
* If our return value does not have VM_FAULT_RETRY set, the mmap_sem
* has not been released.
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
*/
vm_fault_t filemap_fault(struct vm_fault *vmf)
{
int error;
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
pgoff_t max_off;
struct page *page;
vm_fault_t ret = 0;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(offset >= max_off))
return VM_FAULT_SIGBUS;
/*
* Do we have something in the page cache already?
*/
page = find_get_page(mapping, offset);
if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
/*
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
if (!page)
goto no_cached_page;
}
if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
put_page(page);
return ret | VM_FAULT_RETRY;
}
/* Did it get truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
VM_BUG_ON_PAGE(page->index != offset, page);
/*
* We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error.
*/
if (unlikely(!PageUptodate(page)))
goto page_not_uptodate;
/*
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(offset >= max_off)) {
unlock_page(page);
put_page(page);
return VM_FAULT_SIGBUS;
}
vmf->page = page;
return ret | VM_FAULT_LOCKED;
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
error = page_cache_read(file, offset, vmf->gfp_mask);
/*
* The page we want has now been added to the page cache.
* In the unlikely event that someone removed it in the
* meantime, we'll just come back here and read it again.
*/
if (error >= 0)
goto retry_find;
/*
* An error return from page_cache_read can result if the
* system is low on memory, or a problem occurs while trying
* to schedule I/O.
*/
if (error == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
page_not_uptodate:
/*
* Umm, take care of errors if the page isn't up-to-date.
* Try to re-read it _once_. We do this synchronously,
* because there really aren't any performance issues here
* and we need to check for errors.
*/
ClearPageError(page);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
put_page(page);
if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find;
/* Things didn't work out. Return zero to tell the mm layer so. */
shrink_readahead_size_eio(file, ra);
return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{
struct radix_tree_iter iter;
void **slot;
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx;
struct page *head, *page;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) {
if (iter.index > end_pgoff)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
goto next;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
goto next;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
if (!PageUptodate(page) ||
PageReadahead(page) ||
PageHWPoison(page))
goto skip;
if (!trylock_page(page))
goto skip;
if (page->mapping != mapping || !PageUptodate(page))
goto unlock;
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (page->index >= max_idx)
goto unlock;
if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--;
vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
if (vmf->pte)
vmf->pte += iter.index - last_pgoff;
last_pgoff = iter.index;
if (alloc_set_pte(vmf, NULL, page))
goto unlock;
unlock_page(page);
goto next;
unlock:
unlock_page(page);
skip:
put_page(page);
next:
/* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*vmf->pmd))
break;
if (iter.index == end_pgoff)
break;
}
rcu_read_unlock();
}
EXPORT_SYMBOL(filemap_map_pages);
vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
vm_fault_t ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* We mark the page dirty already here so that when freeze is in
* progress, we are guaranteed that writeback during freezing will
* see the dirty page and writeprotect it again.
*/
set_page_dirty(page);
wait_for_stable_page(page);
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = filemap_page_mkwrite,
};
/* This is used for a general mmap of a disk file */
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
return 0;
}
/*
* This is for filesystems which do not implement ->writepage.
*/
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
return generic_file_mmap(file, vma);
}
#else
int filemap_page_mkwrite(struct vm_fault *vmf)
{
return -ENOSYS;
}
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
#endif /* CONFIG_MMU */
EXPORT_SYMBOL(filemap_page_mkwrite);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *wait_on_page_read(struct page *page)
{
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page)) {
put_page(page);
page = ERR_PTR(-EIO);
}
}
return page;
}
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
put_page(page);
if (err == -EEXIST)
goto repeat;
/* Presumably ENOMEM for radix tree node */
return ERR_PTR(err);
}
filler:
err = filler(data, page);
if (err < 0) {
put_page(page);
return ERR_PTR(err);
}
page = wait_on_page_read(page);
if (IS_ERR(page))
return page;
goto out;
}
if (PageUptodate(page))
goto out;
/*
* Page is not up to date and may be locked due one of the following
* case a: Page is being filled and the page lock is held
* case b: Read/write error clearing the page uptodate status
* case c: Truncation in progress (page locked)
* case d: Reclaim in progress
*
* Case a, the page will be up to date when the page is unlocked.
* There is no need to serialise on the page lock here as the page
* is pinned so the lock gives no additional protection. Even if the
* the page is truncated, the data is still valid if PageUptodate as
* it's a race vs truncate race.
* Case b, the page will not be up to date
* Case c, the page may be truncated but in itself, the data may still
* be valid after IO completes as it's a read vs truncate race. The
* operation must restart if the page is not uptodate on unlock but
* otherwise serialising on page lock to stabilise the mapping gives
* no additional guarantees to the caller as the page lock is
* released before return.
* Case d, similar to truncation. If reclaim holds the page lock, it
* will be a race with remove_mapping that determines if the mapping
* is valid on unlock but otherwise the data is valid and there is
* no need to serialise with page lock.
*
* As the page lock gives no additional guarantee, we optimistically
* wait on the page to be unlocked and check if it's up to date and
* use the page if it is. Otherwise, the page lock is required to
* distinguish between the different cases. The motivation is that we
* avoid spurious serialisations and wakeups when multiple processes
* wait on the same page for IO to complete.
*/
wait_on_page_locked(page);
if (PageUptodate(page))
goto out;
/* Distinguish between all the cases under the safety of the lock */
lock_page(page);
/* Case c or d, restart the operation */
if (!page->mapping) {
unlock_page(page);
put_page(page);
goto repeat;
}
/* Someone else locked and filled the page in a very small window */
if (PageUptodate(page)) {
unlock_page(page);
goto out;
}
goto filler;
out:
mark_page_accessed(page);
return page;
}
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page and wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page);
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
* any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return do_read_cache_page(mapping, index, filler, NULL, gfp);
}
EXPORT_SYMBOL(read_cache_page_gfp);
/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
unsigned long limit = rlimit(RLIMIT_FSIZE);
loff_t pos;
if (!iov_iter_count(from))
return 0;
/* FIXME: this is for backwards compatibility with 2.4 */
if (iocb->ki_flags & IOCB_APPEND)
iocb->ki_pos = i_size_read(inode);
pos = iocb->ki_pos;
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
return -EINVAL;
if (limit != RLIM_INFINITY) {
if (iocb->ki_pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
iov_iter_truncate(from, limit - (unsigned long)pos);
}
/*
* LFS rule
*/
if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
!(file->f_flags & O_LARGEFILE))) {
if (pos >= MAX_NON_LFS)
return -EFBIG;
iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
}
/*
* Are we about to exceed the fs block limit ?
*
* If we have written data it becomes a short write. If we have
* exceeded without writing data we send a signal and return EFBIG.
* Linus frestrict idea will clean these up nicely..
*/
if (unlikely(pos >= inode->i_sb->s_maxbytes))
return -EFBIG;
iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
return iov_iter_count(from);
}
EXPORT_SYMBOL(generic_write_checks);
int pagecache_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
}
EXPORT_SYMBOL(pagecache_write_begin);
int pagecache_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
ssize_t
generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
loff_t pos = iocb->ki_pos;
ssize_t written;
size_t write_len;
pgoff_t end;
write_len = iov_iter_count(from);
end = (pos + write_len - 1) >> PAGE_SHIFT;
if (iocb->ki_flags & IOCB_NOWAIT) {
/* If there are pages to writeback, return */
if (filemap_range_has_page(inode->i_mapping, pos,
pos + iov_iter_count(from)))
return -EAGAIN;
} else {
written = filemap_write_and_wait_range(mapping, pos,
pos + write_len - 1);
if (written)
goto out;
}
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_SHIFT, end);
/*
* If a page can not be invalidated, return 0 to fall back
* to buffered write.
*/
if (written) {
if (written == -EBUSY)
return 0;
goto out;
}
written = mapping->a_ops->direct_IO(iocb, from);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*
* Most of the time we do not need this since dio_complete() will do
* the invalidation for us. However there are some file systems that
* do not end up with dio_complete() being called, so let's not break
* them by removing it completely
*/
if (mapping->nrpages)
invalidate_inode_pages2_range(mapping,
pos >> PAGE_SHIFT, end);
if (written > 0) {
pos += written;
write_len -= written;
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
iocb->ki_pos = pos;
}
iov_iter_revert(from, write_len - iov_iter_count(from));
out:
return written;
}
EXPORT_SYMBOL(generic_file_direct_write);
/*
* Find or create a page at the given pagecache position. Return the locked
* page. This function is specifically for buffered writes.
*/
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
struct page *page;
int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
if (flags & AOP_FLAG_NOFS)
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
unsigned int flags = 0;
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
void *fsdata;
offset = (pos & (PAGE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_SIZE - offset,
iov_iter_count(i));
again:
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
* Not only is this an optimisation, but it is also required
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status < 0))
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
flush_dcache_page(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status < 0))
break;
copied = status;
cond_resched();
iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
* fall back to a single segment length write.
*
* If we didn't fallback here, we could livelock
* because not all segments in the iov can be copied at
* once without a pagefault.
*/
bytes = min_t(unsigned long, PAGE_SIZE - offset,
iov_iter_single_seg_count(i));
goto again;
}
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i));
return written ? written : status;
}
EXPORT_SYMBOL(generic_perform_write);
/**
* __generic_file_write_iter - write data to a file
* @iocb: IO state structure (file, offset, etc.)
* @from: iov_iter with data to write
*
* This function does all the work needed for actually writing data to a
* file. It does all basic checks, removes SUID from the file, updates
* modification times and calls proper subroutines depending on whether we
* do direct IO or a standard buffered write.
*
* It expects i_mutex to be grabbed unless we work on a block device or similar
* object which does not need locking at all.
*
* This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
*/
ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written = 0;
ssize_t err;
ssize_t status;
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
err = file_remove_privs(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
if (iocb->ki_flags & IOCB_DIRECT) {
loff_t pos, endbyte;
written = generic_file_direct_write(iocb, from);
/*
* If the write stopped short of completing, fall back to
* buffered writes. Some filesystems do this for writes to
* holes, for example. For DAX files, a buffered write will
* not succeed (even if it did, DAX does not handle dirty
* page-cache pages correctly).
*/
if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
goto out;
status = generic_perform_write(file, from, pos = iocb->ki_pos);
/*
* If generic_perform_write() returned a synchronous error
* then we want to return the number of bytes which were
* direct-written, or the error code if that was zero. Note
* that this differs from normal direct-io semantics, which
* will return -EFOO even if some bytes were written.
*/
if (unlikely(status < 0)) {
err = status;
goto out;
}
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
endbyte = pos + status - 1;
err = filemap_write_and_wait_range(mapping, pos, endbyte);
if (err == 0) {
iocb->ki_pos = endbyte + 1;
written += status;
invalidate_mapping_pages(mapping,
pos >> PAGE_SHIFT,
endbyte >> PAGE_SHIFT);
} else {
/*
* We don't know how much we wrote, so just return
* the number of bytes which were direct-written
*/
}
} else {
written = generic_perform_write(file, from, iocb->ki_pos);
if (likely(written > 0))
iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
EXPORT_SYMBOL(__generic_file_write_iter);
/**
* generic_file_write_iter - write data to a file
* @iocb: IO state structure
* @from: iov_iter with data to write
*
* This is a wrapper around __generic_file_write_iter() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed.
*/
ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret > 0)
ret = __generic_file_write_iter(iocb, from);
inode_unlock(inode);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return ret;
}
EXPORT_SYMBOL(generic_file_write_iter);
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return '1'.
* Otherwise return zero.
*
* This may also be called if PG_fscache is set on a page, indicating that the
* page is known to the local caching routines.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
*
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
|
{
"pile_set_name": "Github"
}
|
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`gatsby-ssr generates <link> tags with \`crossorigin\` \`anonymous\` prop for self-hosted fonts 1`] = `
Array [
<link
as="font"
crossOrigin="anonymous"
href="/font.otf"
rel="preload"
/>,
]
`;
exports[`gatsby-ssr generates <link> tags with \`crossorigin\` prop for external fonts accepts function \`crossOrigin\` in plugin config 1`] = `
Array [
<link
as="font"
crossOrigin="anonymous"
href="https://foo.bar/path/to/font.otf"
rel="preload"
/>,
]
`;
exports[`gatsby-ssr generates <link> tags with \`crossorigin\` prop for external fonts accepts function \`crossOrigin\` in plugin config 2`] = `
Array [
<link
as="font"
crossOrigin="use-credentials"
href="https://foo.bar/path/to/another.woff"
rel="preload"
/>,
]
`;
exports[`gatsby-ssr generates <link> tags with \`crossorigin\` prop for external fonts accepts string \`crossOrigin\` in plugin config 1`] = `
Array [
<link
as="font"
crossOrigin="use-credentials"
href="https://foo.bar/path/to/font.otf"
rel="preload"
/>,
]
`;
exports[`gatsby-ssr generates a <link> tag for each asset 1`] = `
Array [
<link
as="font"
crossOrigin="anonymous"
href="/path/to/font.otf"
rel="preload"
/>,
<link
as="font"
crossOrigin="anonymous"
href="https://foo.bar/path/to/another.ttf"
rel="preload"
/>,
<link
as="font"
crossOrigin="anonymous"
href="https://foo.baz/path/to/another/font.woff"
rel="preload"
/>,
]
`;
|
{
"pile_set_name": "Github"
}
|
//=========================================================
// MusE
// Linux Music Editor
// $Id: eventbase.h,v 1.3.2.3 2009/12/20 05:00:35 terminator356 Exp $
//
// (C) Copyright 1999-2004 Werner Schweer (ws@seh.de)
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; version 2 of
// the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
//=========================================================
#ifndef __EVENTBASE_H__
#define __EVENTBASE_H__
#include <sys/types.h>
#include <sndfile.h>
#include "type_defs.h"
#include "pos.h"
#include "evdata.h"
#include "wave.h" // for SndFileR
//#include "part.h"
//#include "audio_fifo.h"
namespace MusECore {
// Forward declarations:
class Part;
class Fifo;
//---------------------------------------------------------
// EventBase
//---------------------------------------------------------
class EventBase : public PosLen {
EventType _type;
static EventID_t idGen;
// An always unique id.
EventID_t _uniqueId;
// Can be either _uniqueId or the same _uniqueId as other clone 'group' events. De-cloning restores it to _uniqueId.
EventID_t _id;
protected:
int refCount;
bool _selected;
public:
EventBase(EventType t);
// Creates a non-shared clone with same id, or duplicate with unique id, and 0 ref count and invalid Pos sn.
EventBase(const EventBase& ev, bool duplicate_not_clone = false);
virtual ~EventBase() { }
int getRefCount() const { return refCount; }
EventID_t id() const { return _id; }
EventID_t newId() { return idGen++; }
void shareId(const EventBase* ev) { _id = ev->_id; } // Makes id same as given event's. Effectively makes the events non-shared clones.
virtual void assign(const EventBase& ev); // Assigns to this event, excluding the _id.
EventType type() const { return _type; }
void setType(EventType t) { _type = t; }
bool selected() const { return _selected; }
void setSelected(bool val) { _selected = val; }
void move(int offset);
virtual bool isSimilarTo(const EventBase& other) const = 0;
virtual bool isSimilarType(const EventBase& other,
bool compareTime = false,
bool compareA = false, bool compareB = false, bool compareC = false,
bool compareWavePath = false, bool compareWavePos = false, bool compareWaveStartPos = false) const;
virtual void read(Xml&) = 0;
virtual void write(int, Xml&, const Pos& offset, bool forcePath = false) const = 0;
virtual void dump(int n = 0) const;
virtual EventBase* mid(unsigned, unsigned) const = 0;
friend class Event;
virtual bool isNote() const { return false; }
virtual bool isNoteOff() const { return false; }
virtual int pitch() const { return 0; }
virtual int program() const { return 0; }
virtual int cntrl() const { return 0; }
virtual int dataA() const { return 0; }
virtual void setA(int) { }
virtual void setPitch(int) { }
virtual int cntrlVal() const { return 0; }
virtual int dataB() const { return 0; }
virtual int velo() const { return 0; }
virtual void setB(int) { }
virtual void setVelo(int) { }
virtual int veloOff() const { return 0; }
virtual int dataC() const { return 0; }
virtual void setC(int) { }
virtual void setVeloOff(int) { }
virtual const unsigned char* constData() const { return 0; }
virtual int dataLen() const { return 0; }
virtual void setData(const unsigned char*, int) { }
virtual const EvData eventData() const { return EvData(); }
virtual const QString name() const { return QString("?"); }
virtual void setName(const QString&) { }
virtual int spos() const { return 0; }
virtual void setSpos(int) { }
virtual SndFileR sndFile() const { return 0; }
virtual void setSndFile(SndFileR&) { }
// Creates a non-shared clone, having the same 'group' _id.
// NOTE: Certain pointer members may still be SHARED. Such as the sysex MidiEventBase::edata.
// Be aware when iterating or modifying clones.
virtual EventBase* clone() const = 0;
// Restores _id to _uniqueId, removing the event from any clone 'group'.
virtual void deClone() { _id = _uniqueId; }
// Creates a copy of the event base, excluding the 'group' _id.
virtual EventBase* duplicate() const = 0;
virtual void readAudio(unsigned /*frame*/, float** /*bpp*/, int /*channels*/, int /*nn*/, bool /*doSeek*/, bool /*overwrite*/) { }
virtual void seekAudio(sf_count_t /*frame*/) { }
virtual Fifo* audioPrefetchFifo() { return 0; }
virtual void prefetchAudio(Part* /*part*/, sf_count_t /*frames*/) { }
};
} // namespace MusECore
#endif
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<archive type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="8.00">
<data>
<int key="IBDocument.SystemTarget">1552</int>
<string key="IBDocument.SystemVersion">12C60</string>
<string key="IBDocument.InterfaceBuilderVersion">3084</string>
<string key="IBDocument.AppKitVersion">1187.34</string>
<string key="IBDocument.HIToolboxVersion">625.00</string>
<object class="NSMutableDictionary" key="IBDocument.PluginVersions">
<string key="NS.key.0">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="NS.object.0">2083</string>
</object>
<array key="IBDocument.IntegratedClassDependencies">
<string>IBNSLayoutConstraint</string>
<string>IBProxyObject</string>
<string>IBUITableView</string>
<string>IBUIView</string>
</array>
<array key="IBDocument.PluginDependencies">
<string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
</array>
<object class="NSMutableDictionary" key="IBDocument.Metadata">
<string key="NS.key.0">PluginDependencyRecalculationVersion</string>
<integer value="1" key="NS.object.0"/>
</object>
<array class="NSMutableArray" key="IBDocument.RootObjects" id="1000">
<object class="IBProxyObject" id="372490531">
<string key="IBProxiedObjectIdentifier">IBFilesOwner</string>
<string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
</object>
<object class="IBProxyObject" id="843779117">
<string key="IBProxiedObjectIdentifier">IBFirstResponder</string>
<string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
</object>
<object class="IBUIView" id="774585933">
<reference key="NSNextResponder"/>
<int key="NSvFlags">274</int>
<array class="NSMutableArray" key="NSSubviews">
<object class="IBUITableView" id="939496308">
<reference key="NSNextResponder" ref="774585933"/>
<int key="NSvFlags">274</int>
<string key="NSFrameSize">{320, 568}</string>
<reference key="NSSuperview" ref="774585933"/>
<reference key="NSWindow"/>
<string key="NSReuseIdentifierKey">_NS:9</string>
<object class="NSColor" key="IBUIBackgroundColor">
<int key="NSColorSpace">3</int>
<bytes key="NSWhite">MQA</bytes>
</object>
<bool key="IBUIClipsSubviews">YES</bool>
<string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
<bool key="IBUIAlwaysBounceVertical">YES</bool>
<int key="IBUISeparatorStyle">1</int>
<int key="IBUISectionIndexMinimumDisplayRowCount">0</int>
<bool key="IBUIShowsSelectionImmediatelyOnTouchBegin">YES</bool>
<float key="IBUIRowHeight">44</float>
<float key="IBUISectionHeaderHeight">22</float>
<float key="IBUISectionFooterHeight">22</float>
</object>
</array>
<string key="NSFrameSize">{320, 568}</string>
<reference key="NSSuperview"/>
<reference key="NSWindow"/>
<reference key="NSNextKeyView"/>
<object class="NSColor" key="IBUIBackgroundColor">
<int key="NSColorSpace">3</int>
<bytes key="NSWhite">MC43NQA</bytes>
<object class="NSColorSpace" key="NSCustomColorSpace">
<int key="NSID">2</int>
</object>
</object>
<bool key="IBUIClearsContextBeforeDrawing">NO</bool>
<object class="IBUIScreenMetrics" key="IBUISimulatedDestinationMetrics">
<string key="IBUISimulatedSizeMetricsClass">IBUIScreenMetrics</string>
<object class="NSMutableDictionary" key="IBUINormalizedOrientationToSizeMap">
<bool key="EncodedWithXMLCoder">YES</bool>
<array key="dict.sortedKeys">
<integer value="1"/>
<integer value="3"/>
</array>
<array key="dict.values">
<string>{320, 568}</string>
<string>{568, 320}</string>
</array>
</object>
<string key="IBUITargetRuntime">IBCocoaTouchFramework</string>
<string key="IBUIDisplayName">Retina 4 Full Screen</string>
<int key="IBUIType">2</int>
</object>
<string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
</object>
</array>
<object class="IBObjectContainer" key="IBDocument.Objects">
<array class="NSMutableArray" key="connectionRecords">
<object class="IBConnectionRecord">
<object class="IBCocoaTouchOutletConnection" key="connection">
<string key="label">view</string>
<reference key="source" ref="372490531"/>
<reference key="destination" ref="774585933"/>
</object>
<int key="connectionID">7</int>
</object>
<object class="IBConnectionRecord">
<object class="IBCocoaTouchOutletConnection" key="connection">
<string key="label">dataSource</string>
<reference key="source" ref="939496308"/>
<reference key="destination" ref="372490531"/>
</object>
<int key="connectionID">13</int>
</object>
<object class="IBConnectionRecord">
<object class="IBCocoaTouchOutletConnection" key="connection">
<string key="label">delegate</string>
<reference key="source" ref="939496308"/>
<reference key="destination" ref="372490531"/>
</object>
<int key="connectionID">14</int>
</object>
</array>
<object class="IBMutableOrderedSet" key="objectRecords">
<array key="orderedObjects">
<object class="IBObjectRecord">
<int key="objectID">0</int>
<array key="object" id="0"/>
<reference key="children" ref="1000"/>
<nil key="parent"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">-1</int>
<reference key="object" ref="372490531"/>
<reference key="parent" ref="0"/>
<string key="objectName">File's Owner</string>
</object>
<object class="IBObjectRecord">
<int key="objectID">-2</int>
<reference key="object" ref="843779117"/>
<reference key="parent" ref="0"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">6</int>
<reference key="object" ref="774585933"/>
<array class="NSMutableArray" key="children">
<reference ref="939496308"/>
<object class="IBNSLayoutConstraint" id="999719075">
<reference key="firstItem" ref="939496308"/>
<int key="firstAttribute">6</int>
<int key="relation">0</int>
<reference key="secondItem" ref="774585933"/>
<int key="secondAttribute">6</int>
<float key="multiplier">1</float>
<object class="IBLayoutConstant" key="constant">
<double key="value">0.0</double>
</object>
<float key="priority">1000</float>
<reference key="containingView" ref="774585933"/>
<int key="scoringType">8</int>
<float key="scoringTypeFloat">29</float>
<int key="contentType">3</int>
</object>
<object class="IBNSLayoutConstraint" id="534314350">
<reference key="firstItem" ref="939496308"/>
<int key="firstAttribute">4</int>
<int key="relation">0</int>
<reference key="secondItem" ref="774585933"/>
<int key="secondAttribute">4</int>
<float key="multiplier">1</float>
<object class="IBLayoutConstant" key="constant">
<double key="value">0.0</double>
</object>
<float key="priority">1000</float>
<reference key="containingView" ref="774585933"/>
<int key="scoringType">8</int>
<float key="scoringTypeFloat">29</float>
<int key="contentType">3</int>
</object>
<object class="IBNSLayoutConstraint" id="352321463">
<reference key="firstItem" ref="939496308"/>
<int key="firstAttribute">5</int>
<int key="relation">0</int>
<reference key="secondItem" ref="774585933"/>
<int key="secondAttribute">5</int>
<float key="multiplier">1</float>
<object class="IBLayoutConstant" key="constant">
<double key="value">0.0</double>
</object>
<float key="priority">1000</float>
<reference key="containingView" ref="774585933"/>
<int key="scoringType">8</int>
<float key="scoringTypeFloat">29</float>
<int key="contentType">3</int>
</object>
<object class="IBNSLayoutConstraint" id="179479986">
<reference key="firstItem" ref="939496308"/>
<int key="firstAttribute">3</int>
<int key="relation">0</int>
<reference key="secondItem" ref="774585933"/>
<int key="secondAttribute">3</int>
<float key="multiplier">1</float>
<object class="IBLayoutConstant" key="constant">
<double key="value">0.0</double>
</object>
<float key="priority">1000</float>
<reference key="containingView" ref="774585933"/>
<int key="scoringType">8</int>
<float key="scoringTypeFloat">29</float>
<int key="contentType">3</int>
</object>
</array>
<reference key="parent" ref="0"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">8</int>
<reference key="object" ref="939496308"/>
<reference key="parent" ref="774585933"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">9</int>
<reference key="object" ref="179479986"/>
<reference key="parent" ref="774585933"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">10</int>
<reference key="object" ref="352321463"/>
<reference key="parent" ref="774585933"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">11</int>
<reference key="object" ref="534314350"/>
<reference key="parent" ref="774585933"/>
</object>
<object class="IBObjectRecord">
<int key="objectID">12</int>
<reference key="object" ref="999719075"/>
<reference key="parent" ref="774585933"/>
</object>
</array>
</object>
<dictionary class="NSMutableDictionary" key="flattenedProperties">
<string key="-1.CustomClassName">ViewController</string>
<string key="-1.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="-2.CustomClassName">UIResponder</string>
<string key="-2.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="10.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="11.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="12.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<string key="6.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<array key="6.IBViewMetadataConstraints">
<reference ref="179479986"/>
<reference ref="352321463"/>
<reference ref="534314350"/>
<reference ref="999719075"/>
</array>
<string key="8.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
<boolean value="NO" key="8.IBViewMetadataTranslatesAutoresizingMaskIntoConstraints"/>
<string key="9.IBPluginDependency">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
</dictionary>
<dictionary class="NSMutableDictionary" key="unlocalizedProperties"/>
<nil key="activeLocalization"/>
<dictionary class="NSMutableDictionary" key="localizations"/>
<nil key="sourceID"/>
<int key="maxID">14</int>
</object>
<object class="IBClassDescriber" key="IBDocument.Classes">
<array class="NSMutableArray" key="referencedPartialClassDescriptions">
<object class="IBPartialClassDescription">
<string key="className">NSLayoutConstraint</string>
<string key="superclassName">NSObject</string>
<object class="IBClassDescriptionSource" key="sourceIdentifier">
<string key="majorKey">IBProjectSource</string>
<string key="minorKey">./Classes/NSLayoutConstraint.h</string>
</object>
</object>
<object class="IBPartialClassDescription">
<string key="className">ViewController</string>
<string key="superclassName">UIViewController</string>
<object class="IBClassDescriptionSource" key="sourceIdentifier">
<string key="majorKey">IBProjectSource</string>
<string key="minorKey">./Classes/ViewController.h</string>
</object>
</object>
</array>
</object>
<int key="IBDocument.localizationMode">0</int>
<string key="IBDocument.TargetRuntimeIdentifier">IBCocoaTouchFramework</string>
<bool key="IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion">YES</bool>
<int key="IBDocument.defaultPropertyAccessControl">3</int>
<bool key="IBDocument.UseAutolayout">YES</bool>
<string key="IBCocoaTouchPluginVersion">2083</string>
</data>
</archive>
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="ASCII"?>
<!--This file was created automatically by html2xhtml-->
<!--from the HTML stylesheets.-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml" version="1.0">
<!-- ********************************************************************
$Id: synop.xsl 9829 2013-11-05 20:07:15Z bobstayton $
********************************************************************
This file is part of the XSL DocBook Stylesheet distribution.
See ../README or http://docbook.sf.net/release/xsl/current/ for
copyright and other information.
******************************************************************** -->
<!-- ==================================================================== -->
<!-- synopsis is in verbatim -->
<!-- ==================================================================== -->
<xsl:template match="cmdsynopsis">
<div>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<p>
<xsl:call-template name="id.attribute">
<xsl:with-param name="conditional" select="0"/>
</xsl:call-template>
<xsl:choose>
<xsl:when test="..//processing-instruction('dbcmdlist')">
<!-- * Placing a dbcmdlist PI as a child of a particular element -->
<!-- * creates a hyperlinked list of all cmdsynopsis instances -->
<!-- * that are descendants of that element; so for any -->
<!-- * cmdsynopsis that is a descendant of an element containing -->
<!-- * a dbcmdlist PI, we need to output an a@id instance so that -->
<!-- * we will have something to link to -->
<xsl:call-template name="anchor">
<xsl:with-param name="conditional" select="0"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:call-template name="anchor">
<xsl:with-param name="conditional" select="1"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
<xsl:apply-templates/>
</p>
</div>
</xsl:template>
<xsl:template match="cmdsynopsis/command">
<br/>
<xsl:call-template name="inline.monoseq"/>
<xsl:text> </xsl:text>
</xsl:template>
<xsl:template match="cmdsynopsis/command[1]" priority="2">
<xsl:call-template name="inline.monoseq"/>
<xsl:text> </xsl:text>
</xsl:template>
<xsl:template match="group|arg" name="group-or-arg">
<xsl:variable name="choice" select="@choice"/>
<xsl:variable name="rep" select="@rep"/>
<xsl:variable name="sepchar">
<xsl:choose>
<xsl:when test="ancestor-or-self::*/@sepchar">
<xsl:value-of select="ancestor-or-self::*/@sepchar"/>
</xsl:when>
<xsl:otherwise>
<xsl:text> </xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:if test="preceding-sibling::*">
<xsl:value-of select="$sepchar"/>
</xsl:if>
<xsl:choose>
<xsl:when test="$choice='plain'">
<xsl:value-of select="$arg.choice.plain.open.str"/>
</xsl:when>
<xsl:when test="$choice='req'">
<xsl:value-of select="$arg.choice.req.open.str"/>
</xsl:when>
<xsl:when test="$choice='opt'">
<xsl:value-of select="$arg.choice.opt.open.str"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$arg.choice.def.open.str"/>
</xsl:otherwise>
</xsl:choose>
<xsl:apply-templates/>
<xsl:choose>
<xsl:when test="$rep='repeat'">
<xsl:value-of select="$arg.rep.repeat.str"/>
</xsl:when>
<xsl:when test="$rep='norepeat'">
<xsl:value-of select="$arg.rep.norepeat.str"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$arg.rep.def.str"/>
</xsl:otherwise>
</xsl:choose>
<xsl:choose>
<xsl:when test="$choice='plain'">
<xsl:value-of select="$arg.choice.plain.close.str"/>
</xsl:when>
<xsl:when test="$choice='req'">
<xsl:value-of select="$arg.choice.req.close.str"/>
</xsl:when>
<xsl:when test="$choice='opt'">
<xsl:value-of select="$arg.choice.opt.close.str"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$arg.choice.def.close.str"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="group/arg">
<xsl:variable name="choice" select="@choice"/>
<xsl:variable name="rep" select="@rep"/>
<xsl:if test="preceding-sibling::*">
<xsl:value-of select="$arg.or.sep"/>
</xsl:if>
<xsl:call-template name="group-or-arg"/>
</xsl:template>
<xsl:template match="sbr">
<br/>
</xsl:template>
<!-- ==================================================================== -->
<xsl:template match="synopfragmentref">
<xsl:variable name="target" select="key('id',@linkend)"/>
<xsl:variable name="snum">
<xsl:apply-templates select="$target" mode="synopfragment.number"/>
</xsl:variable>
<em xmlns:xslo="http://www.w3.org/1999/XSL/Transform">
<a href="#{@linkend}">
<xsl:text>(</xsl:text>
<xsl:value-of select="$snum"/>
<xsl:text>)</xsl:text>
</a>
<xsl:text> </xsl:text>
<xsl:apply-templates/>
</em>
</xsl:template>
<xsl:template match="synopfragment" mode="synopfragment.number">
<xsl:number format="1"/>
</xsl:template>
<xsl:template match="synopfragment">
<xsl:variable name="snum">
<xsl:apply-templates select="." mode="synopfragment.number"/>
</xsl:variable>
<!-- You can't introduce another <p> here, because you're
already in a <p> from cmdsynopsis-->
<span>
<xsl:variable name="id">
<xsl:call-template name="object.id"/>
</xsl:variable>
<a id="{$id}">
<xsl:text>(</xsl:text>
<xsl:value-of select="$snum"/>
<xsl:text>)</xsl:text>
</a>
<xsl:text> </xsl:text>
<xsl:apply-templates/>
</span>
</xsl:template>
<xsl:template match="funcsynopsis">
<xsl:if test="..//processing-instruction('dbfunclist')">
<!-- * Placing a dbfunclist PI as a child of a particular element -->
<!-- * creates a hyperlinked list of all funcsynopsis instances that -->
<!-- * are descendants of that element; so for any funcsynopsis that is -->
<!-- * a descendant of an element containing a dbfunclist PI, we need -->
<!-- * to output an a@id instance so that we will have something to -->
<!-- * link to -->
<span>
<xsl:call-template name="id.attribute">
<xsl:with-param name="conditional" select="0"/>
</xsl:call-template>
</span>
<xsl:call-template name="anchor">
<xsl:with-param name="conditional" select="0"/>
</xsl:call-template>
</xsl:if>
<xsl:call-template name="informal.object"/>
</xsl:template>
<xsl:template match="funcsynopsisinfo">
<pre>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates/>
</pre>
</xsl:template>
<!-- ====================================================================== -->
<!-- funcprototype -->
<!--
funcprototype ::= (funcdef,
(void|varargs|paramdef+))
funcdef ::= (#PCDATA|type|replaceable|function)*
paramdef ::= (#PCDATA|type|replaceable|parameter|funcparams)*
-->
<xsl:template match="funcprototype">
<xsl:variable name="html-style">
<xsl:call-template name="pi.dbhtml_funcsynopsis-style">
<xsl:with-param name="node" select="ancestor::funcsynopsis/descendant-or-self::*"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="style">
<xsl:choose>
<xsl:when test="$html-style != ''">
<xsl:value-of select="$html-style"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$funcsynopsis.style"/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<!-- * 2008-02-17. the code no longer relies on the funcsynopsis.tabular.threshold -->
<!-- * param at all (the stuff below has been commented out since mid -->
<!-- * 2006), so I completely removed the funcsynopsis.tabular.threshold param -->
<!-- * .. MikeSmith -->
<!--
<xsl:variable name="tabular-p"
select="$funcsynopsis.tabular.threshold > 0
and string-length(.) > $funcsynopsis.tabular.threshold"/>
-->
<xsl:variable name="tabular-p" select="true()"/>
<xsl:choose>
<xsl:when test="$style = 'kr' and $tabular-p">
<xsl:apply-templates select="." mode="kr-tabular"/>
</xsl:when>
<xsl:when test="$style = 'kr'">
<xsl:apply-templates select="." mode="kr-nontabular"/>
</xsl:when>
<xsl:when test="$style = 'ansi' and $tabular-p">
<xsl:apply-templates select="." mode="ansi-tabular"/>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates select="." mode="ansi-nontabular"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- ====================================================================== -->
<!-- funcprototype: kr, non-tabular -->
<xsl:template match="funcprototype" mode="kr-nontabular">
<p>
<xsl:apply-templates mode="kr-nontabular"/>
<xsl:if test="paramdef">
<br/>
<xsl:apply-templates select="paramdef" mode="kr-funcsynopsis-mode"/>
</xsl:if>
</p>
</xsl:template>
<xsl:template match="funcdef" mode="kr-nontabular">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="kr-nontabular"/>
<xsl:text>(</xsl:text>
</code>
</xsl:template>
<xsl:template match="funcdef/function" mode="kr-nontabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<strong xmlns:xslo="http://www.w3.org/1999/XSL/Transform">fsfunc<xsl:apply-templates mode="kr-nontabular"/></strong>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates mode="kr-nontabular"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="void" mode="kr-nontabular">
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:template>
<xsl:template match="varargs" mode="kr-nontabular">
<xsl:text>...</xsl:text>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:template>
<xsl:template match="paramdef" mode="kr-nontabular">
<xsl:apply-templates select="parameter" mode="kr-nontabular"/>
<xsl:choose>
<xsl:when test="following-sibling::*">
<xsl:text>, </xsl:text>
</xsl:when>
<xsl:otherwise>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="kr-nontabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="kr-nontabular"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="kr-nontabular"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="paramdef" mode="kr-funcsynopsis-mode">
<xsl:if test="preceding-sibling::paramdef"><br/></xsl:if>
<code>
<xsl:apply-templates mode="kr-funcsynopsis-mode"/>
</code>
<xsl:text>;</xsl:text>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="kr-funcsynopsis-mode">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="kr-funcsynopsis-mode"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="kr-funcsynopsis-mode"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="funcparams" mode="kr-funcsynopsis-mode">
<code>(</code>
<xsl:apply-templates mode="kr-funcsynopsis-mode"/>
<code>)</code>
</xsl:template>
<!-- ====================================================================== -->
<!-- funcprototype: kr, tabular -->
<xsl:template match="funcprototype" mode="kr-tabular">
<table border="{$table.border.off}" class="funcprototype-table">
<xsl:if test="$div.element != 'section'">
<xsl:attribute name="summary">Function synopsis</xsl:attribute>
</xsl:if>
<xsl:if test="$css.decoration != 0">
<xsl:attribute name="style">cellspacing: 0; cellpadding: 0;</xsl:attribute>
</xsl:if>
<tr>
<td>
<xsl:apply-templates select="funcdef" mode="kr-tabular"/>
</td>
<xsl:apply-templates select="(void|varargs|paramdef)[1]" mode="kr-tabular"/>
</tr>
<xsl:for-each select="(void|varargs|paramdef)[preceding-sibling::*[not(self::funcdef)]]">
<tr>
<td> </td>
<xsl:apply-templates select="." mode="kr-tabular"/>
</tr>
</xsl:for-each>
</table>
<xsl:if test="paramdef">
<div class="paramdef-list">
<xsl:apply-templates select="paramdef" mode="kr-funcsynopsis-mode"/>
</div>
</xsl:if>
<div class="funcprototype-spacer"> </div> <!-- hACk: blank div for vertical spacing -->
</xsl:template>
<xsl:template match="funcdef" mode="kr-tabular">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="kr-tabular"/>
<xsl:text>(</xsl:text>
</code>
</xsl:template>
<xsl:template match="funcdef/function" mode="kr-tabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<strong xmlns:xslo="http://www.w3.org/1999/XSL/Transform">fsfunc<xsl:apply-templates mode="kr-nontabular"/></strong>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates mode="kr-tabular"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="void" mode="kr-tabular">
<td>
<code>)</code>
<xsl:text>;</xsl:text>
</td>
<td> </td>
</xsl:template>
<xsl:template match="varargs" mode="kr-tabular">
<td>
<xsl:text>...</xsl:text>
<code>)</code>
<xsl:text>;</xsl:text>
</td>
<td> </td>
</xsl:template>
<xsl:template match="paramdef" mode="kr-tabular">
<td>
<xsl:apply-templates select="parameter" mode="kr-tabular"/>
<xsl:choose>
<xsl:when test="following-sibling::*">
<xsl:text>, </xsl:text>
</xsl:when>
<xsl:otherwise>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:otherwise>
</xsl:choose>
</td>
<td> </td>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="kr-tabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="kr-tabular"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="kr-tabular"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="paramdef" mode="kr-tabular-funcsynopsis-mode">
<xsl:variable name="type">
<xsl:choose>
<xsl:when test="type">
<xsl:apply-templates select="type" mode="kr-tabular-funcsynopsis-mode"/>
</xsl:when>
<xsl:when test="normalize-space(parameter/preceding-sibling::node()[not(self::parameter)]) != ''">
<xsl:copy-of select="parameter/preceding-sibling::node()[not(self::parameter)]"/>
</xsl:when>
</xsl:choose>
</xsl:variable>
<tr>
<xsl:choose>
<xsl:when test="$type != '' and funcparams">
<td>
<code>
<xsl:copy-of select="$type"/>
</code>
<xsl:text> </xsl:text>
</td>
<td>
<code>
<xsl:choose>
<xsl:when test="type">
<xsl:apply-templates select="type/following-sibling::*" mode="kr-tabular-funcsynopsis-mode"/>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates select="*" mode="kr-tabular-funcsynopsis-mode"/>
</xsl:otherwise>
</xsl:choose>
</code>
</td>
</xsl:when>
<xsl:when test="funcparams">
<td colspan="2">
<code>
<xsl:apply-templates mode="kr-tabular-funcsynopsis-mode"/>
</code>
</td>
</xsl:when>
<xsl:otherwise>
<td>
<code>
<xsl:apply-templates select="parameter/preceding-sibling::node()[not(self::parameter)]" mode="kr-tabular-funcsynopsis-mode"/>
</code>
<xsl:text> </xsl:text>
</td>
<td>
<code>
<xsl:apply-templates select="parameter" mode="kr-tabular"/>
<xsl:apply-templates select="parameter/following-sibling::*[not(self::parameter)]" mode="kr-tabular-funcsynopsis-mode"/>
<xsl:text>;</xsl:text>
</code>
</td>
</xsl:otherwise>
</xsl:choose>
</tr>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="kr-tabular-funcsynopsis-mode">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="kr-tabular-funcsynopsis-mode"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="kr-tabular-funcsynopsis-mode"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="funcparams" mode="kr-tabular-funcsynopsis-mode">
<code>(</code>
<xsl:apply-templates mode="kr-tabular-funcsynopsis-mode"/>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:template>
<!-- ====================================================================== -->
<!-- funcprototype: ansi, non-tabular -->
<xsl:template match="funcprototype" mode="ansi-nontabular">
<p>
<xsl:apply-templates mode="ansi-nontabular"/>
</p>
</xsl:template>
<xsl:template match="funcdef" mode="ansi-nontabular">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="ansi-nontabular"/>
<xsl:text>(</xsl:text>
</code>
</xsl:template>
<xsl:template match="funcdef/function" mode="ansi-nontabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<strong xmlns:xslo="http://www.w3.org/1999/XSL/Transform">fsfunc<xsl:apply-templates mode="ansi-nontabular"/></strong>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates mode="ansi-nontabular"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="void" mode="ansi-nontabular">
<code>void)</code>
<xsl:text>;</xsl:text>
</xsl:template>
<xsl:template match="varargs" mode="ansi-nontabular">
<xsl:text>...</xsl:text>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:template>
<xsl:template match="paramdef" mode="ansi-nontabular">
<xsl:apply-templates mode="ansi-nontabular"/>
<xsl:choose>
<xsl:when test="following-sibling::*">
<xsl:text>, </xsl:text>
</xsl:when>
<xsl:otherwise>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="ansi-nontabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="ansi-nontabular"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="ansi-nontabular"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="funcparams" mode="ansi-nontabular">
<code>(</code>
<xsl:apply-templates mode="ansi-nontabular"/>
<code>)</code>
</xsl:template>
<!-- ====================================================================== -->
<!-- funcprototype: ansi, tabular -->
<xsl:template match="funcprototype" mode="ansi-tabular">
<table border="{$table.border.off}" class="funcprototype-table">
<xsl:if test="$div.element != 'section'">
<xsl:attribute name="summary">Function synopsis</xsl:attribute>
</xsl:if>
<xsl:if test="$css.decoration != 0">
<xsl:attribute name="style">cellspacing: 0; cellpadding: 0;</xsl:attribute>
</xsl:if>
<tr>
<td>
<xsl:apply-templates select="funcdef" mode="ansi-tabular"/>
</td>
<xsl:apply-templates select="(void|varargs|paramdef)[1]" mode="ansi-tabular"/>
</tr>
<xsl:for-each select="(void|varargs|paramdef)[preceding-sibling::*[not(self::funcdef)]]">
<tr>
<td> </td>
<xsl:apply-templates select="." mode="ansi-tabular"/>
</tr>
</xsl:for-each>
</table>
<div class="funcprototype-spacer"> </div> <!-- hACk: blank div for vertical spacing -->
</xsl:template>
<xsl:template match="funcdef" mode="ansi-tabular">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="ansi-tabular"/>
<xsl:text>(</xsl:text>
</code>
</xsl:template>
<xsl:template match="funcdef/function" mode="ansi-tabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<strong xmlns:xslo="http://www.w3.org/1999/XSL/Transform">fsfunc<xsl:apply-templates mode="ansi-nontabular"/></strong>
</xsl:when>
<xsl:otherwise>
<xsl:apply-templates mode="kr-tabular"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="void" mode="ansi-tabular">
<td>
<code>void)</code>
<xsl:text>;</xsl:text>
</td>
<td> </td>
</xsl:template>
<xsl:template match="varargs" mode="ansi-tabular">
<td>
<xsl:text>...</xsl:text>
<code>)</code>
<xsl:text>;</xsl:text>
</td>
<td> </td>
</xsl:template>
<xsl:template match="paramdef" mode="ansi-tabular">
<td>
<xsl:apply-templates mode="ansi-tabular"/>
<xsl:choose>
<xsl:when test="following-sibling::*">
<xsl:text>, </xsl:text>
</xsl:when>
<xsl:otherwise>
<code>)</code>
<xsl:text>;</xsl:text>
</xsl:otherwise>
</xsl:choose>
</td>
</xsl:template>
<xsl:template match="paramdef/parameter" mode="ansi-tabular">
<xsl:choose>
<xsl:when test="$funcsynopsis.decoration != 0">
<var class="pdparam">
<xsl:apply-templates mode="ansi-tabular"/>
</var>
</xsl:when>
<xsl:otherwise>
<code>
<xsl:apply-templates mode="ansi-tabular"/>
</code>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="funcparams" mode="ansi-tabular">
<code>(</code>
<xsl:apply-templates/>
<code>)</code>
</xsl:template>
<!-- ====================================================================== -->
<xsl:variable name="default-classsynopsis-language">java</xsl:variable>
<xsl:template match="classsynopsis |fieldsynopsis |methodsynopsis |constructorsynopsis |destructorsynopsis">
<xsl:param name="language">
<xsl:choose>
<xsl:when test="@language">
<xsl:value-of select="@language"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$default-classsynopsis-language"/>
</xsl:otherwise>
</xsl:choose>
</xsl:param>
<xsl:choose>
<xsl:when test="$language='java' or $language='Java'">
<xsl:apply-templates select="." mode="java"/>
</xsl:when>
<xsl:when test="$language='perl' or $language='Perl'">
<xsl:apply-templates select="." mode="perl"/>
</xsl:when>
<xsl:when test="$language='idl' or $language='IDL'">
<xsl:apply-templates select="." mode="idl"/>
</xsl:when>
<xsl:when test="$language='cpp' or $language='c++' or $language='C++'">
<xsl:apply-templates select="." mode="cpp"/>
</xsl:when>
<xsl:otherwise>
<xsl:message>
<xsl:text>Unrecognized language on </xsl:text>
<xsl:value-of select="local-name(.)"/>
<xsl:text>: </xsl:text>
<xsl:value-of select="$language"/>
</xsl:message>
<xsl:apply-templates select=".">
<xsl:with-param name="language" select="$default-classsynopsis-language"/>
</xsl:apply-templates>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template name="synop-break">
<xsl:if test="parent::classsynopsis or (following-sibling::fieldsynopsis |following-sibling::methodsynopsis |following-sibling::constructorsynopsis |following-sibling::destructorsynopsis)">
<br/>
</xsl:if>
</xsl:template>
<!-- ===== Java ======================================================== -->
<xsl:template match="classsynopsis" mode="java">
<pre>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates select="ooclass[1]" mode="java"/>
<xsl:if test="ooclass[preceding-sibling::*]">
<xsl:text> extends</xsl:text>
<xsl:apply-templates select="ooclass[preceding-sibling::*]" mode="java"/>
<xsl:if test="oointerface|ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="oointerface">
<xsl:text>implements</xsl:text>
<xsl:apply-templates select="oointerface" mode="java"/>
<xsl:if test="ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="ooexception">
<xsl:text>throws</xsl:text>
<xsl:apply-templates select="ooexception" mode="java"/>
</xsl:if>
<xsl:text> {</xsl:text>
<br/>
<xsl:apply-templates select="constructorsynopsis |destructorsynopsis |fieldsynopsis |methodsynopsis |classsynopsisinfo" mode="java"/>
<xsl:text>}</xsl:text>
</pre>
</xsl:template>
<xsl:template match="classsynopsisinfo" mode="java">
<xsl:apply-templates mode="java"/>
</xsl:template>
<xsl:template match="ooclass|oointerface|ooexception" mode="java">
<xsl:choose>
<xsl:when test="preceding-sibling::*">
<xsl:text>, </xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text> </xsl:text>
</xsl:otherwise>
</xsl:choose>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="modifier|package" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
<xsl:if test="following-sibling::*">
<xsl:text> </xsl:text>
</xsl:if>
</span>
</xsl:template>
<xsl:template match="classname" mode="java">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'classname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="interfacename" mode="java">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'interfacename'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="exceptionname" mode="java">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'exceptionname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="fieldsynopsis" mode="java">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates mode="java"/>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<xsl:template match="type" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="varname" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="initializer" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>= </xsl:text>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="void" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>void </xsl:text>
</span>
</xsl:template>
<xsl:template match="methodname" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="methodparam" mode="java">
<xsl:param name="indent">0</xsl:param>
<xsl:if test="preceding-sibling::methodparam">
<xsl:text>,</xsl:text>
<br/>
<xsl:if test="$indent > 0">
<xsl:call-template name="copy-string">
<xsl:with-param name="string"> </xsl:with-param>
<xsl:with-param name="count" select="$indent + 1"/>
</xsl:call-template>
</xsl:if>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template match="parameter" mode="java">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="java"/>
</span>
</xsl:template>
<xsl:template mode="java" match="constructorsynopsis|destructorsynopsis|methodsynopsis">
<xsl:variable name="start-modifiers" select="modifier[following-sibling::*[local-name(.) != 'modifier']]"/>
<xsl:variable name="notmod" select="*[local-name(.) != 'modifier']"/>
<xsl:variable name="end-modifiers" select="modifier[preceding-sibling::*[local-name(.) != 'modifier']]"/>
<xsl:variable name="decl">
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates select="$start-modifiers" mode="java"/>
<!-- type -->
<xsl:if test="local-name($notmod[1]) != 'methodname'">
<xsl:apply-templates select="$notmod[1]" mode="java"/>
</xsl:if>
<xsl:apply-templates select="methodname" mode="java"/>
</xsl:variable>
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:copy-of select="$decl"/>
<xsl:text>(</xsl:text>
<xsl:apply-templates select="methodparam" mode="java">
<xsl:with-param name="indent" select="string-length($decl)"/>
</xsl:apply-templates>
<xsl:text>)</xsl:text>
<xsl:if test="exceptionname">
<br/>
<xsl:text>    throws </xsl:text>
<xsl:apply-templates select="exceptionname" mode="java"/>
</xsl:if>
<xsl:if test="modifier[preceding-sibling::*[local-name(.) != 'modifier']]">
<xsl:text> </xsl:text>
<xsl:apply-templates select="$end-modifiers" mode="java"/>
</xsl:if>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<!-- ===== C++ ========================================================= -->
<xsl:template match="classsynopsis" mode="cpp">
<pre>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates select="ooclass[1]" mode="cpp"/>
<xsl:if test="ooclass[preceding-sibling::*]">
<xsl:text>: </xsl:text>
<xsl:apply-templates select="ooclass[preceding-sibling::*]" mode="cpp"/>
<xsl:if test="oointerface|ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="oointerface">
<xsl:text> implements</xsl:text>
<xsl:apply-templates select="oointerface" mode="cpp"/>
<xsl:if test="ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="ooexception">
<xsl:text> throws</xsl:text>
<xsl:apply-templates select="ooexception" mode="cpp"/>
</xsl:if>
<xsl:text> {</xsl:text>
<br/>
<xsl:apply-templates select="constructorsynopsis |destructorsynopsis |fieldsynopsis |methodsynopsis |classsynopsisinfo" mode="cpp"/>
<xsl:text>}</xsl:text>
</pre>
</xsl:template>
<xsl:template match="classsynopsisinfo" mode="cpp">
<xsl:apply-templates mode="cpp"/>
</xsl:template>
<xsl:template match="ooclass|oointerface|ooexception" mode="cpp">
<xsl:if test="preceding-sibling::*">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="modifier|package" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
<xsl:if test="following-sibling::*">
<xsl:text> </xsl:text>
</xsl:if>
</span>
</xsl:template>
<xsl:template match="classname" mode="cpp">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'classname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="interfacename" mode="cpp">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'interfacename'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="exceptionname" mode="cpp">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'exceptionname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="fieldsynopsis" mode="cpp">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates mode="cpp"/>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<xsl:template match="type" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="varname" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="initializer" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>= </xsl:text>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="void" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>void </xsl:text>
</span>
</xsl:template>
<xsl:template match="methodname" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="methodparam" mode="cpp">
<xsl:if test="preceding-sibling::methodparam">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template match="parameter" mode="cpp">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="cpp"/>
</span>
</xsl:template>
<xsl:template mode="cpp" match="constructorsynopsis|destructorsynopsis|methodsynopsis">
<xsl:variable name="start-modifiers" select="modifier[following-sibling::*[local-name(.) != 'modifier']]"/>
<xsl:variable name="notmod" select="*[local-name(.) != 'modifier']"/>
<xsl:variable name="end-modifiers" select="modifier[preceding-sibling::*[local-name(.) != 'modifier']]"/>
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates select="$start-modifiers" mode="cpp"/>
<!-- type -->
<xsl:if test="local-name($notmod[1]) != 'methodname'">
<xsl:apply-templates select="$notmod[1]" mode="cpp"/>
</xsl:if>
<xsl:apply-templates select="methodname" mode="cpp"/>
<xsl:text>(</xsl:text>
<xsl:apply-templates select="methodparam" mode="cpp"/>
<xsl:text>)</xsl:text>
<xsl:if test="exceptionname">
<br/>
<xsl:text>    throws </xsl:text>
<xsl:apply-templates select="exceptionname" mode="cpp"/>
</xsl:if>
<xsl:if test="modifier[preceding-sibling::*[local-name(.) != 'modifier']]">
<xsl:text> </xsl:text>
<xsl:apply-templates select="$end-modifiers" mode="cpp"/>
</xsl:if>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<!-- ===== IDL ========================================================= -->
<xsl:template match="classsynopsis" mode="idl">
<pre>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>interface </xsl:text>
<xsl:apply-templates select="ooclass[1]" mode="idl"/>
<xsl:if test="ooclass[preceding-sibling::*]">
<xsl:text>: </xsl:text>
<xsl:apply-templates select="ooclass[preceding-sibling::*]" mode="idl"/>
<xsl:if test="oointerface|ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="oointerface">
<xsl:text> implements</xsl:text>
<xsl:apply-templates select="oointerface" mode="idl"/>
<xsl:if test="ooexception">
<br/>
<xsl:text>    </xsl:text>
</xsl:if>
</xsl:if>
<xsl:if test="ooexception">
<xsl:text> throws</xsl:text>
<xsl:apply-templates select="ooexception" mode="idl"/>
</xsl:if>
<xsl:text> {</xsl:text>
<br/>
<xsl:apply-templates select="constructorsynopsis |destructorsynopsis |fieldsynopsis |methodsynopsis |classsynopsisinfo" mode="idl"/>
<xsl:text>}</xsl:text>
</pre>
</xsl:template>
<xsl:template match="classsynopsisinfo" mode="idl">
<xsl:apply-templates mode="idl"/>
</xsl:template>
<xsl:template match="ooclass|oointerface|ooexception" mode="idl">
<xsl:if test="preceding-sibling::*">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="modifier|package" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
<xsl:if test="following-sibling::*">
<xsl:text> </xsl:text>
</xsl:if>
</span>
</xsl:template>
<xsl:template match="classname" mode="idl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'classname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="interfacename" mode="idl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'interfacename'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="exceptionname" mode="idl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'exceptionname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="fieldsynopsis" mode="idl">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates mode="idl"/>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<xsl:template match="type" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="varname" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="initializer" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>= </xsl:text>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="void" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>void </xsl:text>
</span>
</xsl:template>
<xsl:template match="methodname" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="methodparam" mode="idl">
<xsl:if test="preceding-sibling::methodparam">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template match="parameter" mode="idl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="idl"/>
</span>
</xsl:template>
<xsl:template mode="idl" match="constructorsynopsis|destructorsynopsis|methodsynopsis">
<xsl:variable name="start-modifiers" select="modifier[following-sibling::*[local-name(.) != 'modifier']]"/>
<xsl:variable name="notmod" select="*[local-name(.) != 'modifier']"/>
<xsl:variable name="end-modifiers" select="modifier[preceding-sibling::*[local-name(.) != 'modifier']]"/>
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates select="$start-modifiers" mode="idl"/>
<!-- type -->
<xsl:if test="local-name($notmod[1]) != 'methodname'">
<xsl:apply-templates select="$notmod[1]" mode="idl"/>
</xsl:if>
<xsl:apply-templates select="methodname" mode="idl"/>
<xsl:text>(</xsl:text>
<xsl:apply-templates select="methodparam" mode="idl"/>
<xsl:text>)</xsl:text>
<xsl:if test="exceptionname">
<br/>
<xsl:text>    raises(</xsl:text>
<xsl:apply-templates select="exceptionname" mode="idl"/>
<xsl:text>)</xsl:text>
</xsl:if>
<xsl:if test="modifier[preceding-sibling::*[local-name(.) != 'modifier']]">
<xsl:text> </xsl:text>
<xsl:apply-templates select="$end-modifiers" mode="idl"/>
</xsl:if>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<!-- ===== Perl ======================================================== -->
<xsl:template match="classsynopsis" mode="perl">
<pre>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>package </xsl:text>
<xsl:apply-templates select="ooclass[1]" mode="perl"/>
<xsl:text>;</xsl:text>
<br/>
<xsl:if test="ooclass[preceding-sibling::*]">
<xsl:text>@ISA = (</xsl:text>
<xsl:apply-templates select="ooclass[preceding-sibling::*]" mode="perl"/>
<xsl:text>);</xsl:text>
<br/>
</xsl:if>
<xsl:apply-templates select="constructorsynopsis |destructorsynopsis |fieldsynopsis |methodsynopsis |classsynopsisinfo" mode="perl"/>
</pre>
</xsl:template>
<xsl:template match="classsynopsisinfo" mode="perl">
<xsl:apply-templates mode="perl"/>
</xsl:template>
<xsl:template match="ooclass|oointerface|ooexception" mode="perl">
<xsl:if test="preceding-sibling::*">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="modifier|package" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
<xsl:if test="following-sibling::*">
<xsl:text> </xsl:text>
</xsl:if>
</span>
</xsl:template>
<xsl:template match="classname" mode="perl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'classname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="interfacename" mode="perl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'interfacename'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="exceptionname" mode="perl">
<xsl:if test="local-name(preceding-sibling::*[1]) = 'exceptionname'">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="fieldsynopsis" mode="perl">
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:if test="parent::classsynopsis">
<xsl:text>  </xsl:text>
</xsl:if>
<xsl:apply-templates mode="perl"/>
<xsl:text>;</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<xsl:template match="type" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="varname" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
<xsl:text> </xsl:text>
</span>
</xsl:template>
<xsl:template match="initializer" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>= </xsl:text>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="void" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>void </xsl:text>
</span>
</xsl:template>
<xsl:template match="methodname" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="methodparam" mode="perl">
<xsl:if test="preceding-sibling::methodparam">
<xsl:text>, </xsl:text>
</xsl:if>
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template match="parameter" mode="perl">
<span>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:apply-templates mode="perl"/>
</span>
</xsl:template>
<xsl:template mode="perl" match="constructorsynopsis|destructorsynopsis|methodsynopsis">
<xsl:variable name="start-modifiers" select="modifier[following-sibling::*[local-name(.) != 'modifier']]"/>
<xsl:variable name="notmod" select="*[local-name(.) != 'modifier']"/>
<xsl:variable name="end-modifiers" select="modifier[preceding-sibling::*[local-name(.) != 'modifier']]"/>
<code>
<xsl:apply-templates select="." mode="common.html.attributes"/>
<xsl:call-template name="id.attribute"/>
<xsl:text>sub </xsl:text>
<xsl:apply-templates select="methodname" mode="perl"/>
<xsl:text> { ... };</xsl:text>
</code>
<xsl:call-template name="synop-break"/>
</xsl:template>
<!-- Used when not occurring as a child of classsynopsis -->
<xsl:template match="ooclass|oointerface|ooexception">
<xsl:apply-templates/>
</xsl:template>
<!-- ==================================================================== -->
<!-- * DocBook 5 allows linking elements (link, olink, and xref) -->
<!-- * within the OO *synopsis elements (classsynopsis, fieldsynopsis, -->
<!-- * methodsynopsis, constructorsynopsis, destructorsynopsis) and -->
<!-- * their children. So we need to have mode="java|cpp|idl|perl" -->
<!-- * per-mode matches for those linking elements in order for them -->
<!-- * to be processed as expected. -->
<xsl:template match="link|olink|xref" mode="java">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="cpp">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="idl">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="perl">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="ansi-nontabular">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="ansi-tabular">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="kr-nontabular">
<xsl:apply-templates select="."/>
</xsl:template>
<xsl:template match="link|olink|xref" mode="kr-tabular">
<xsl:apply-templates select="."/>
</xsl:template>
</xsl:stylesheet>
|
{
"pile_set_name": "Github"
}
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Guide to Using Jupyter Notebooks\n",
"In this lecture we will be going over the basics of the Jupyter (previously called iPython Notebooks).\n",
"\n",
"For a complete User Manual check out the [Bryn Mawr College Computer Science Guide](https://jupyter.brynmawr.edu/services/public/dblank/Jupyter%20Notebook%20Users%20Manual.ipynb).\n",
"\n",
"Most of the breakdown will actually occur in the presentation corresponding to this Notebook. So please refer to either the presentation or the full User Manual linked above."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.2"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2014 DataGenerator Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.datagenerator.engine.scxml;
import org.apache.commons.scxml.Context;
import org.apache.commons.scxml.SCXMLExecutor;
import org.apache.commons.scxml.SCXMLExpressionException;
import org.apache.commons.scxml.env.jsp.ELContext;
import org.apache.commons.scxml.env.jsp.ELEvaluator;
import org.apache.commons.scxml.model.Action;
import org.apache.commons.scxml.model.OnEntry;
import org.apache.commons.scxml.model.SCXML;
import org.apache.commons.scxml.model.Transition;
import org.apache.commons.scxml.model.TransitionTarget;
import org.apache.log4j.Logger;
import org.finra.datagenerator.distributor.ProcessingStrategy;
import org.finra.datagenerator.engine.Frontier;
import org.finra.datagenerator.engine.scxml.tags.CustomTagExtension;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Frontier implementation for generating data with SCXML state machine models.
*/
public class SCXMLFrontier extends SCXMLExecutor implements Frontier {
private final PossibleState root;
private static final Logger log = Logger.getLogger(SCXMLFrontier.class);
private List<CustomTagExtension> tagExtensionList;
/**
* Constructor
*
* @param possibleState the root node of the model and partial variable assignment to start a dfs from
* @param model the model text
* @param tagExtensionList custom tags used in this model
*/
public SCXMLFrontier(final PossibleState possibleState, final SCXML model,
final List<CustomTagExtension> tagExtensionList) {
root = possibleState;
this.tagExtensionList = tagExtensionList;
this.setStateMachine(model);
ELEvaluator elEvaluator = new ELEvaluator();
ELContext context = new ELContext();
this.setEvaluator(elEvaluator);
this.setRootContext(context);
}
/**
* Constructor
*
* @param possibleState the root node of the model and partial variable assignment to start a dfs from
* @param model the model text
*/
public SCXMLFrontier(final PossibleState possibleState, final SCXML model) {
this(possibleState, model, new LinkedList<CustomTagExtension>());
}
/**
* Performs a DFS on the model, starting from root, giving results to the processingStrategy
* Just a public wrapper for private dfs function
*
* @param processingStrategy the results handler
* @param flag used to stop the search before completion
*/
public void searchForScenarios(ProcessingStrategy processingStrategy, AtomicBoolean flag) {
dfs(processingStrategy, flag, root);
}
private void dfs(ProcessingStrategy processingStrategy, AtomicBoolean flag, PossibleState state) {
if (flag.get()) {
return;
}
TransitionTarget nextState = state.nextState;
//reached end of chart, valid assignment found
if (nextState.getId().equalsIgnoreCase("end")) {
processingStrategy.processOutput(state.variables);
return;
}
//run every action in series
List<Map<String, String>> product = new LinkedList<>();
product.add(new HashMap<>(state.variables));
OnEntry entry = nextState.getOnEntry();
List<Action> actions = entry.getActions();
for (Action action : actions) {
for (CustomTagExtension tagExtension : tagExtensionList) {
if (tagExtension.getTagActionClass().isInstance(action)) {
product = tagExtension.pipelinePossibleStates(action, product);
}
}
}
//go through every transition and see which of the products are valid, recursive searching on them
List<Transition> transitions = nextState.getTransitionsList();
for (Transition transition : transitions) {
String condition = transition.getCond();
TransitionTarget target = ((List<TransitionTarget>) transition.getTargets()).get(0);
for (Map<String, String> p : product) {
Boolean pass;
if (condition == null) {
pass = true;
} else {
//scrub the context clean so we may use it to evaluate transition conditional
Context context = this.getRootContext();
context.reset();
//set up new context
for (Map.Entry<String, String> e : p.entrySet()) {
context.set(e.getKey(), e.getValue());
}
//evaluate condition
try {
pass = (Boolean) this.getEvaluator().eval(context, condition);
} catch (SCXMLExpressionException ex) {
pass = false;
}
}
//transition condition satisfied, continue search recursively
if (pass) {
PossibleState result = new PossibleState(target, p);
dfs(processingStrategy, flag, result);
}
}
}
}
public PossibleState getRoot() {
return root;
}
}
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<transformation>
<info>
<name>wordcount-mapper</name>
<description/>
<extended_description/>
<trans_version/>
<trans_type>Normal</trans_type>
<trans_status>0</trans_status>
<directory>/</directory>
<parameters>
</parameters>
<log>
<trans-log-table><connection/>
<schema/>
<table/>
<size_limit_lines/>
<interval/>
<timeout_days/>
<field><id>ID_BATCH</id><enabled>Y</enabled><name>ID_BATCH</name></field><field><id>CHANNEL_ID</id><enabled>Y</enabled><name>CHANNEL_ID</name></field><field><id>TRANSNAME</id><enabled>Y</enabled><name>TRANSNAME</name></field><field><id>STATUS</id><enabled>Y</enabled><name>STATUS</name></field><field><id>LINES_READ</id><enabled>Y</enabled><name>LINES_READ</name><subject/></field><field><id>LINES_WRITTEN</id><enabled>Y</enabled><name>LINES_WRITTEN</name><subject/></field><field><id>LINES_UPDATED</id><enabled>Y</enabled><name>LINES_UPDATED</name><subject/></field><field><id>LINES_INPUT</id><enabled>Y</enabled><name>LINES_INPUT</name><subject/></field><field><id>LINES_OUTPUT</id><enabled>Y</enabled><name>LINES_OUTPUT</name><subject/></field><field><id>LINES_REJECTED</id><enabled>Y</enabled><name>LINES_REJECTED</name><subject/></field><field><id>ERRORS</id><enabled>Y</enabled><name>ERRORS</name></field><field><id>STARTDATE</id><enabled>Y</enabled><name>STARTDATE</name></field><field><id>ENDDATE</id><enabled>Y</enabled><name>ENDDATE</name></field><field><id>LOGDATE</id><enabled>Y</enabled><name>LOGDATE</name></field><field><id>DEPDATE</id><enabled>Y</enabled><name>DEPDATE</name></field><field><id>REPLAYDATE</id><enabled>Y</enabled><name>REPLAYDATE</name></field><field><id>LOG_FIELD</id><enabled>Y</enabled><name>LOG_FIELD</name></field></trans-log-table>
<perf-log-table><connection/>
<schema/>
<table/>
<interval/>
<timeout_days/>
<field><id>ID_BATCH</id><enabled>Y</enabled><name>ID_BATCH</name></field><field><id>SEQ_NR</id><enabled>Y</enabled><name>SEQ_NR</name></field><field><id>LOGDATE</id><enabled>Y</enabled><name>LOGDATE</name></field><field><id>TRANSNAME</id><enabled>Y</enabled><name>TRANSNAME</name></field><field><id>STEPNAME</id><enabled>Y</enabled><name>STEPNAME</name></field><field><id>STEP_COPY</id><enabled>Y</enabled><name>STEP_COPY</name></field><field><id>LINES_READ</id><enabled>Y</enabled><name>LINES_READ</name></field><field><id>LINES_WRITTEN</id><enabled>Y</enabled><name>LINES_WRITTEN</name></field><field><id>LINES_UPDATED</id><enabled>Y</enabled><name>LINES_UPDATED</name></field><field><id>LINES_INPUT</id><enabled>Y</enabled><name>LINES_INPUT</name></field><field><id>LINES_OUTPUT</id><enabled>Y</enabled><name>LINES_OUTPUT</name></field><field><id>LINES_REJECTED</id><enabled>Y</enabled><name>LINES_REJECTED</name></field><field><id>ERRORS</id><enabled>Y</enabled><name>ERRORS</name></field><field><id>INPUT_BUFFER_ROWS</id><enabled>Y</enabled><name>INPUT_BUFFER_ROWS</name></field><field><id>OUTPUT_BUFFER_ROWS</id><enabled>Y</enabled><name>OUTPUT_BUFFER_ROWS</name></field></perf-log-table>
<channel-log-table><connection/>
<schema/>
<table/>
<timeout_days/>
<field><id>ID_BATCH</id><enabled>Y</enabled><name>ID_BATCH</name></field><field><id>CHANNEL_ID</id><enabled>Y</enabled><name>CHANNEL_ID</name></field><field><id>LOG_DATE</id><enabled>Y</enabled><name>LOG_DATE</name></field><field><id>LOGGING_OBJECT_TYPE</id><enabled>Y</enabled><name>LOGGING_OBJECT_TYPE</name></field><field><id>OBJECT_NAME</id><enabled>Y</enabled><name>OBJECT_NAME</name></field><field><id>OBJECT_COPY</id><enabled>Y</enabled><name>OBJECT_COPY</name></field><field><id>REPOSITORY_DIRECTORY</id><enabled>Y</enabled><name>REPOSITORY_DIRECTORY</name></field><field><id>FILENAME</id><enabled>Y</enabled><name>FILENAME</name></field><field><id>OBJECT_ID</id><enabled>Y</enabled><name>OBJECT_ID</name></field><field><id>OBJECT_REVISION</id><enabled>Y</enabled><name>OBJECT_REVISION</name></field><field><id>PARENT_CHANNEL_ID</id><enabled>Y</enabled><name>PARENT_CHANNEL_ID</name></field><field><id>ROOT_CHANNEL_ID</id><enabled>Y</enabled><name>ROOT_CHANNEL_ID</name></field></channel-log-table>
<step-log-table><connection/>
<schema/>
<table/>
<timeout_days/>
<field><id>ID_BATCH</id><enabled>Y</enabled><name>ID_BATCH</name></field><field><id>CHANNEL_ID</id><enabled>Y</enabled><name>CHANNEL_ID</name></field><field><id>LOG_DATE</id><enabled>Y</enabled><name>LOG_DATE</name></field><field><id>TRANSNAME</id><enabled>Y</enabled><name>TRANSNAME</name></field><field><id>STEPNAME</id><enabled>Y</enabled><name>STEPNAME</name></field><field><id>STEP_COPY</id><enabled>Y</enabled><name>STEP_COPY</name></field><field><id>LINES_READ</id><enabled>Y</enabled><name>LINES_READ</name></field><field><id>LINES_WRITTEN</id><enabled>Y</enabled><name>LINES_WRITTEN</name></field><field><id>LINES_UPDATED</id><enabled>Y</enabled><name>LINES_UPDATED</name></field><field><id>LINES_INPUT</id><enabled>Y</enabled><name>LINES_INPUT</name></field><field><id>LINES_OUTPUT</id><enabled>Y</enabled><name>LINES_OUTPUT</name></field><field><id>LINES_REJECTED</id><enabled>Y</enabled><name>LINES_REJECTED</name></field><field><id>ERRORS</id><enabled>Y</enabled><name>ERRORS</name></field><field><id>LOG_FIELD</id><enabled>N</enabled><name>LOG_FIELD</name></field></step-log-table>
</log>
<maxdate>
<connection/>
<table/>
<field/>
<offset>0.0</offset>
<maxdiff>0.0</maxdiff>
</maxdate>
<size_rowset>10000</size_rowset>
<sleep_time_empty>50</sleep_time_empty>
<sleep_time_full>50</sleep_time_full>
<unique_connections>N</unique_connections>
<feedback_shown>Y</feedback_shown>
<feedback_size>50000</feedback_size>
<using_thread_priorities>Y</using_thread_priorities>
<shared_objects_file/>
<capture_step_performance>N</capture_step_performance>
<step_performance_capturing_delay>1000</step_performance_capturing_delay>
<step_performance_capturing_size_limit>100</step_performance_capturing_size_limit>
<dependencies>
</dependencies>
<partitionschemas>
</partitionschemas>
<slaveservers>
</slaveservers>
<clusterschemas>
</clusterschemas>
<created_user/>
<created_date>2010/12/30 23:29:15.795</created_date>
<modified_user>-</modified_user>
<modified_date>2010/07/15 10:12:26.133</modified_date>
</info>
<notepads>
</notepads>
<connection>
<name>CMS</name>
<server>${CMS_HOSTNAME}</server>
<type>MYSQL</type>
<access>Native</access>
<database>${CMS_DATABASE}</database>
<port>${CMS_PORT}</port>
<username>${CMS_USERNAME}</username>
<password>${CMS_PASSWORD}</password>
<servername/>
<data_tablespace/>
<index_tablespace/>
<attributes>
<attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>
<attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>
<attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>
<attribute><code>PORT_NUMBER</code><attribute>${CMS_PORT}</attribute></attribute>
<attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>
<attribute><code>STREAM_RESULTS</code><attribute>Y</attribute></attribute>
<attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>
</attributes>
</connection>
<connection>
<name>DWH</name>
<server>${DWH_HOSTNAME}</server>
<type>MYSQL</type>
<access>Native</access>
<database>${DWH_DATABASE}</database>
<port>${DWH_PORT}</port>
<username>${DWH_USERNAME}</username>
<password>${DWH_PASSWORD}</password>
<servername/>
<data_tablespace/>
<index_tablespace/>
<attributes>
<attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>
<attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>
<attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>
<attribute><code>PORT_NUMBER</code><attribute>${DWH_PORT}</attribute></attribute>
<attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>
<attribute><code>STREAM_RESULTS</code><attribute>Y</attribute></attribute>
<attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>
</attributes>
</connection>
<connection>
<name>LOGGING</name>
<server>${LOGGING_HOSTNAME}</server>
<type>MYSQL</type>
<access>Native</access>
<database>${LOGGING_DATABASE}</database>
<port>${LOGGING_PORT}</port>
<username>${LOGGING_USERNAME}</username>
<password>${LOGGING_PASSWORD}</password>
<servername/>
<data_tablespace/>
<index_tablespace/>
<attributes>
<attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>
<attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>
<attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>
<attribute><code>PORT_NUMBER</code><attribute>${LOGGING_PORT}</attribute></attribute>
<attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>
<attribute><code>STREAM_RESULTS</code><attribute>Y</attribute></attribute>
<attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>
</attributes>
</connection>
<connection>
<name>Postgres</name>
<server>localhost</server>
<type>POSTGRESQL</type>
<access>Native</access>
<database>test</database>
<port>5432</port>
<username>postgres</username>
<password>Encrypted 2be98afc86aa7f2e4bb16bd64d980aac9</password>
<servername/>
<data_tablespace/>
<index_tablespace/>
<attributes>
<attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>
<attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>
<attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>
<attribute><code>PORT_NUMBER</code><attribute>5432</attribute></attribute>
<attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>
<attribute><code>SUPPORTS_BOOLEAN_DATA_TYPE</code><attribute>N</attribute></attribute>
<attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>
</attributes>
</connection>
<connection>
<name>STG</name>
<server>${STG_HOSTNAME}</server>
<type>MYSQL</type>
<access>Native</access>
<database>${STG_DATABASE}</database>
<port>${STG_PORT}</port>
<username>${STG_USERNAME}</username>
<password>${STG_PASSWORD}</password>
<servername/>
<data_tablespace/>
<index_tablespace/>
<attributes>
<attribute><code>FORCE_IDENTIFIERS_TO_LOWERCASE</code><attribute>N</attribute></attribute>
<attribute><code>FORCE_IDENTIFIERS_TO_UPPERCASE</code><attribute>N</attribute></attribute>
<attribute><code>IS_CLUSTERED</code><attribute>N</attribute></attribute>
<attribute><code>PORT_NUMBER</code><attribute>${STG_PORT}</attribute></attribute>
<attribute><code>QUOTE_ALL_FIELDS</code><attribute>N</attribute></attribute>
<attribute><code>STREAM_RESULTS</code><attribute>Y</attribute></attribute>
<attribute><code>USE_POOLING</code><attribute>N</attribute></attribute>
</attributes>
</connection>
<order>
<hop> <from>Dev Generator</from><to>Split words to rows</to><enabled>N</enabled> </hop> <hop> <from>Split words to rows</from><to>Add value</to><enabled>Y</enabled> </hop> <hop> <from>Add value</from><to>Remove garbage</to><enabled>Y</enabled> </hop> <hop> <from>Remove garbage</from><to>Output</to><enabled>Y</enabled> </hop> <hop> <from>Injector</from><to>Split words to rows</to><enabled>Y</enabled> </hop> </order>
<step>
<name>Add value</name>
<type>Constant</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<fields>
<field>
<name>outValue</name>
<type>Integer</type>
<format/>
<currency/>
<decimal/>
<group/>
<nullif>1</nullif>
<length>-1</length>
<precision>-1</precision>
</field>
</fields>
<cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>510</xloc>
<yloc>222</yloc>
<draw>Y</draw>
</GUI>
</step>
<step>
<name>Dev Generator</name>
<type>RowGenerator</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<fields>
<field>
<name>key</name>
<type>String</type>
<format/>
<currency/>
<decimal/>
<group/>
<nullif>14</nullif>
<length>-1</length>
<precision>-1</precision>
</field>
<field>
<name>value</name>
<type>String</type>
<format/>
<currency/>
<decimal/>
<group/>
<nullif>Hello world good bye world</nullif>
<length>-1</length>
<precision>-1</precision>
</field>
</fields>
<limit>1</limit>
<cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>353</xloc>
<yloc>59</yloc>
<draw>Y</draw>
</GUI>
</step>
<step>
<name>Injector</name>
<type>Injector</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<fields> <field> <name>key</name>
<type>String</type>
<length>-1</length>
<precision>-1</precision>
</field> <field> <name>value</name>
<type>String</type>
<length>-1</length>
<precision>-1</precision>
</field> </fields> <cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>167</xloc>
<yloc>221</yloc>
<draw>Y</draw>
</GUI>
</step>
<step>
<name>Output</name>
<type>Dummy</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>778</xloc>
<yloc>221</yloc>
<draw>Y</draw>
</GUI>
</step>
<step>
<name>Remove garbage</name>
<type>SelectValues</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<fields> <select_unspecified>N</select_unspecified>
<remove> <name>key</name>
</remove> <remove> <name>value</name>
</remove> </fields> <cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>650</xloc>
<yloc>222</yloc>
<draw>Y</draw>
</GUI>
</step>
<step>
<name>Split words to rows</name>
<type>SplitFieldToRows3</type>
<description/>
<distribute>Y</distribute>
<copies>1</copies>
<partitioning>
<method>none</method>
<schema_name/>
</partitioning>
<splitfield>value</splitfield>
<delimiter> </delimiter>
<newfield>outKey</newfield>
<rownum>N</rownum>
<rownum_field/>
<resetrownumber>Y</resetrownumber>
<cluster_schema/>
<remotesteps> <input> </input> <output> </output> </remotesteps> <GUI>
<xloc>352</xloc>
<yloc>221</yloc>
<draw>Y</draw>
</GUI>
</step>
<step_error_handling>
</step_error_handling>
<slave-step-copy-partition-distribution>
</slave-step-copy-partition-distribution>
<slave_transformation>N</slave_transformation>
</transformation>
|
{
"pile_set_name": "Github"
}
|
#ifndef COOLFluiD_Numerics_SubSystemCoupler_FluidSolidHeatPostVariableTransformerFVMCC_hh
#define COOLFluiD_Numerics_SubSystemCoupler_FluidSolidHeatPostVariableTransformerFVMCC_hh
//////////////////////////////////////////////////////////////////////
#include "PostVariableTransformer.hh"
//////////////////////////////////////////////////////////////////////
namespace COOLFluiD {
namespace Physics {
namespace NavierStokes {
class EulerVarSet;
}
}
namespace Numerics {
namespace SubSystemCoupler {
//////////////////////////////////////////////////////////////////////
/**
* This class represents a FluidSolidHeatPre transformer of variables
*
* @author Thomas Wuilbaut
*
*/
class FluidSolidHeatPostVariableTransformerFVMCC : public PostVariableTransformer {
public:
/**
* Default constructor without arguments
*/
FluidSolidHeatPostVariableTransformerFVMCC(const std::string& name);
/**
* Default destructor
*/
~FluidSolidHeatPostVariableTransformerFVMCC();
/**
* Configuration
*/
void configure ( Config::ConfigArgs& args )
{
PostVariableTransformer::configure(args);
}
/**
* Sets Up the object
*/
virtual void setup();
/**
* Transform a vector into another one
*/
virtual RealVector* transform(const std::vector<GeoEntityIdx>& faces,
const RealVector& coord,
const RealVector& original,
const RealVector& pastTransformedVector);
/**
* Transform a vector into another one (in the case of nodal values)
*/
virtual RealVector* transform(const std::vector<GeoEntityIdx>& faces,
const RealVector& coord,
const RealVector& currentState,
const RealVector& original,
const RealVector& pastTransformedVector)
{
return transform(faces, coord, original, pastTransformedVector);
}
/**
* Return the size of the transformed vector
*/
CFuint getTransformedSize(const CFuint size)
{
cf_assert(size == 1);
return 1;
}
private:
/// corresponding diffusive variable set
Common::SafePtr<Physics::NavierStokes::EulerVarSet> _varSet;
}; // end of class FluidSolidHeatPostVariableTransformerFVMCC
//////////////////////////////////////////////////////////////////////
} // namespace SubSystemCoupler
} // namespace Physics
} // namespace COOLFluiD
//////////////////////////////////////////////////////////////////////
#endif // COOLFluiD_Numerics_SubSystemCoupler_FluidSolidHeatPostVariableTransformerFVMCC_hh
|
{
"pile_set_name": "Github"
}
|
<!-- doc/src/sgml/ltree.sgml -->
<sect1 id="ltree" xreflabel="ltree">
<title>ltree</title>
<indexterm zone="ltree">
<primary>ltree</primary>
</indexterm>
<para>
This module implements a data type <type>ltree</type> for representing
labels of data stored in a hierarchical tree-like structure.
Extensive facilities for searching through label trees are provided.
</para>
<para>
This module is considered <quote>trusted</quote>, that is, it can be
installed by non-superusers who have <literal>CREATE</literal> privilege
on the current database.
</para>
<sect2>
<title>Definitions</title>
<para>
A <firstterm>label</firstterm> is a sequence of alphanumeric characters
and underscores (for example, in C locale the characters
<literal>A-Za-z0-9_</literal> are allowed).
Labels must be less than 256 characters long.
</para>
<para>
Examples: <literal>42</literal>, <literal>Personal_Services</literal>
</para>
<para>
A <firstterm>label path</firstterm> is a sequence of zero or more
labels separated by dots, for example <literal>L1.L2.L3</literal>, representing
a path from the root of a hierarchical tree to a particular node. The
length of a label path cannot exceed 65535 labels.
</para>
<para>
Example: <literal>Top.Countries.Europe.Russia</literal>
</para>
<para>
The <filename>ltree</filename> module provides several data types:
</para>
<itemizedlist>
<listitem>
<para>
<type>ltree</type> stores a label path.
</para>
</listitem>
<listitem>
<para>
<type>lquery</type> represents a regular-expression-like pattern
for matching <type>ltree</type> values. A simple word matches that
label within a path. A star symbol (<literal>*</literal>) matches zero
or more labels. These can be joined with dots to form a pattern that
must match the whole label path. For example:
<synopsis>
foo <lineannotation>Match the exact label path <literal>foo</literal></lineannotation>
*.foo.* <lineannotation>Match any label path containing the label <literal>foo</literal></lineannotation>
*.foo <lineannotation>Match any label path whose last label is <literal>foo</literal></lineannotation>
</synopsis>
</para>
<para>
Both star symbols and simple words can be quantified to restrict how many
labels they can match:
<synopsis>
*{<replaceable>n</replaceable>} <lineannotation>Match exactly <replaceable>n</replaceable> labels</lineannotation>
*{<replaceable>n</replaceable>,} <lineannotation>Match at least <replaceable>n</replaceable> labels</lineannotation>
*{<replaceable>n</replaceable>,<replaceable>m</replaceable>} <lineannotation>Match at least <replaceable>n</replaceable> but not more than <replaceable>m</replaceable> labels</lineannotation>
*{,<replaceable>m</replaceable>} <lineannotation>Match at most <replaceable>m</replaceable> labels — same as </lineannotation>*{0,<replaceable>m</replaceable>}
foo{<replaceable>n</replaceable>,<replaceable>m</replaceable>} <lineannotation>Match at least <replaceable>n</replaceable> but not more than <replaceable>m</replaceable> occurrences of <literal>foo</literal></lineannotation>
foo{,} <lineannotation>Match any number of occurrences of <literal>foo</literal>, including zero</lineannotation>
</synopsis>
In the absence of any explicit quantifier, the default for a star symbol
is to match any number of labels (that is, <literal>{,}</literal>) while
the default for a non-star item is to match exactly once (that
is, <literal>{1}</literal>).
</para>
<para>
There are several modifiers that can be put at the end of a non-star
<type>lquery</type> item to make it match more than just the exact match:
<synopsis>
@ <lineannotation>Match case-insensitively, for example <literal>a@</literal> matches <literal>A</literal></lineannotation>
* <lineannotation>Match any label with this prefix, for example <literal>foo*</literal> matches <literal>foobar</literal></lineannotation>
% <lineannotation>Match initial underscore-separated words</lineannotation>
</synopsis>
The behavior of <literal>%</literal> is a bit complicated. It tries to match
words rather than the entire label. For example
<literal>foo_bar%</literal> matches <literal>foo_bar_baz</literal> but not
<literal>foo_barbaz</literal>. If combined with <literal>*</literal>, prefix
matching applies to each word separately, for example
<literal>foo_bar%*</literal> matches <literal>foo1_bar2_baz</literal> but
not <literal>foo1_br2_baz</literal>.
</para>
<para>
Also, you can write several possibly-modified non-star items separated with
<literal>|</literal> (OR) to match any of those items, and you can put
<literal>!</literal> (NOT) at the start of a non-star group to match any
label that doesn't match any of the alternatives. A quantifier, if any,
goes at the end of the group; it means some number of matches for the
group as a whole (that is, some number of labels matching or not matching
any of the alternatives).
</para>
<para>
Here's an annotated example of <type>lquery</type>:
<programlisting>
Top.*{0,2}.sport*@.!football|tennis{1,}.Russ*|Spain
a. b. c. d. e.
</programlisting>
This query will match any label path that:
</para>
<orderedlist numeration="loweralpha">
<listitem>
<para>
begins with the label <literal>Top</literal>
</para>
</listitem>
<listitem>
<para>
and next has zero to two labels before
</para>
</listitem>
<listitem>
<para>
a label beginning with the case-insensitive prefix <literal>sport</literal>
</para>
</listitem>
<listitem>
<para>
then has one or more labels, none of which
match <literal>football</literal> nor <literal>tennis</literal>
</para>
</listitem>
<listitem>
<para>
and then ends with a label beginning with <literal>Russ</literal> or
exactly matching <literal>Spain</literal>.
</para>
</listitem>
</orderedlist>
</listitem>
<listitem>
<para><type>ltxtquery</type> represents a full-text-search-like
pattern for matching <type>ltree</type> values. An
<type>ltxtquery</type> value contains words, possibly with the
modifiers <literal>@</literal>, <literal>*</literal>, <literal>%</literal> at the end;
the modifiers have the same meanings as in <type>lquery</type>.
Words can be combined with <literal>&</literal> (AND),
<literal>|</literal> (OR), <literal>!</literal> (NOT), and parentheses.
The key difference from
<type>lquery</type> is that <type>ltxtquery</type> matches words without
regard to their position in the label path.
</para>
<para>
Here's an example <type>ltxtquery</type>:
<programlisting>
Europe & Russia*@ & !Transportation
</programlisting>
This will match paths that contain the label <literal>Europe</literal> and
any label beginning with <literal>Russia</literal> (case-insensitive),
but not paths containing the label <literal>Transportation</literal>.
The location of these words within the path is not important.
Also, when <literal>%</literal> is used, the word can be matched to any
underscore-separated word within a label, regardless of position.
</para>
</listitem>
</itemizedlist>
<para>
Note: <type>ltxtquery</type> allows whitespace between symbols, but
<type>ltree</type> and <type>lquery</type> do not.
</para>
</sect2>
<sect2>
<title>Operators and Functions</title>
<para>
Type <type>ltree</type> has the usual comparison operators
<literal>=</literal>, <literal><></literal>,
<literal><</literal>, <literal>></literal>, <literal><=</literal>, <literal>>=</literal>.
Comparison sorts in the order of a tree traversal, with the children
of a node sorted by label text. In addition, the specialized
operators shown in <xref linkend="ltree-op-table"/> are available.
</para>
<table id="ltree-op-table">
<title><type>ltree</type> Operators</title>
<tgroup cols="1">
<thead>
<row>
<entry role="func_table_entry"><para role="func_signature">
Operator
</para>
<para>
Description
</para></entry>
</row>
</thead>
<tbody>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>@></literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Is left argument an ancestor of right (or equal)?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal><@</literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Is left argument a descendant of right (or equal)?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>~</literal> <type>lquery</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>lquery</type> <literal>~</literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does <type>ltree</type> match <type>lquery</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>?</literal> <type>lquery[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>lquery[]</type> <literal>?</literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does <type>ltree</type> match any <type>lquery</type> in array?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>@</literal> <type>ltxtquery</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>ltxtquery</type> <literal>@</literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does <type>ltree</type> match <type>ltxtquery</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>||</literal> <type>ltree</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Concatenates <type>ltree</type> paths.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree</type> <literal>||</literal> <type>text</type>
<returnvalue>ltree</returnvalue>
</para>
<para role="func_signature">
<type>text</type> <literal>||</literal> <type>ltree</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Converts text to <type>ltree</type> and concatenates.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>@></literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>ltree</type> <literal><@</literal> <type>ltree[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does array contain an ancestor of <type>ltree</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal><@</literal> <type>ltree</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>ltree</type> <literal>@></literal> <type>ltree[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does array contain a descendant of <type>ltree</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>~</literal> <type>lquery</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>lquery</type> <literal>~</literal> <type>ltree[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does array contain any path matching <type>lquery</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>?</literal> <type>lquery[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>lquery[]</type> <literal>?</literal> <type>ltree[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does <type>ltree</type> array contain any path matching
any <type>lquery</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>@</literal> <type>ltxtquery</type>
<returnvalue>boolean</returnvalue>
</para>
<para role="func_signature">
<type>ltxtquery</type> <literal>@</literal> <type>ltree[]</type>
<returnvalue>boolean</returnvalue>
</para>
<para>
Does array contain any path matching <type>ltxtquery</type>?
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>?@></literal> <type>ltree</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns first array entry that is an ancestor of <type>ltree</type>,
or <literal>NULL</literal> if none.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>?<@</literal> <type>ltree</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns first array entry that is a descendant of <type>ltree</type>,
or <literal>NULL</literal> if none.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>?~</literal> <type>lquery</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns first array entry that matches <type>lquery</type>,
or <literal>NULL</literal> if none.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<type>ltree[]</type> <literal>?@</literal> <type>ltxtquery</type>
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns first array entry that matches <type>ltxtquery</type>,
or <literal>NULL</literal> if none.
</para></entry>
</row>
</tbody>
</tgroup>
</table>
<para>
The operators <literal><@</literal>, <literal>@></literal>,
<literal>@</literal> and <literal>~</literal> have analogues
<literal>^<@</literal>, <literal>^@></literal>, <literal>^@</literal>,
<literal>^~</literal>, which are the same except they do not use
indexes. These are useful only for testing purposes.
</para>
<para>
The available functions are shown in <xref linkend="ltree-func-table"/>.
</para>
<table id="ltree-func-table">
<title><type>ltree</type> Functions</title>
<tgroup cols="1">
<thead>
<row>
<entry role="func_table_entry"><para role="func_signature">
Function
</para>
<para>
Description
</para>
<para>
Example(s)
</para></entry>
</row>
</thead>
<tbody>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>subltree</primary></indexterm>
<function>subltree</function> ( <type>ltree</type>, <parameter>start</parameter> <type>integer</type>, <parameter>end</parameter> <type>integer</type> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns subpath of <type>ltree</type> from
position <parameter>start</parameter> to
position <parameter>end</parameter>-1 (counting from 0).
</para>
<para>
<literal>subltree('Top.Child1.Child2',1,2)</literal>
<returnvalue>Child1</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>subpath</primary></indexterm>
<function>subpath</function> ( <type>ltree</type>, <parameter>offset</parameter> <type>integer</type>, <parameter>len</parameter> <type>integer</type> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns subpath of <type>ltree</type> starting at
position <parameter>offset</parameter>, with
length <parameter>len</parameter>. If <parameter>offset</parameter>
is negative, subpath starts that far from the end of the path.
If <parameter>len</parameter> is negative, leaves that many labels off
the end of the path.
</para>
<para>
<literal>subpath('Top.Child1.Child2',0,2)</literal>
<returnvalue>Top.Child1</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<function>subpath</function> ( <type>ltree</type>, <parameter>offset</parameter> <type>integer</type> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Returns subpath of <type>ltree</type> starting at
position <parameter>offset</parameter>, extending to end of path.
If <parameter>offset</parameter> is negative, subpath starts that far
from the end of the path.
</para>
<para>
<literal>subpath('Top.Child1.Child2',1)</literal>
<returnvalue>Child1.Child2</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>nlevel</primary></indexterm>
<function>nlevel</function> ( <type>ltree</type> )
<returnvalue>integer</returnvalue>
</para>
<para>
Returns number of labels in path.
</para>
<para>
<literal>nlevel('Top.Child1.Child2')</literal>
<returnvalue>3</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>index</primary></indexterm>
<function>index</function> ( <parameter>a</parameter> <type>ltree</type>, <parameter>b</parameter> <type>ltree</type> )
<returnvalue>integer</returnvalue>
</para>
<para>
Returns position of first occurrence of <parameter>b</parameter> in
<parameter>a</parameter>, or -1 if not found.
</para>
<para>
<literal>index('0.1.2.3.5.4.5.6.8.5.6.8','5.6')</literal>
<returnvalue>6</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<function>index</function> ( <parameter>a</parameter> <type>ltree</type>, <parameter>b</parameter> <type>ltree</type>, <parameter>offset</parameter> <type>integer</type> )
<returnvalue>integer</returnvalue>
</para>
<para>
Returns position of first occurrence of <parameter>b</parameter>
in <parameter>a</parameter>, or -1 if not found. The search starts at
position <parameter>offset</parameter>;
negative <parameter>offset</parameter> means
start <parameter>-offset</parameter> labels from the end of the path.
</para>
<para>
<literal>index('0.1.2.3.5.4.5.6.8.5.6.8','5.6',-4)</literal>
<returnvalue>9</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>text2ltree</primary></indexterm>
<function>text2ltree</function> ( <type>text</type> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Casts <type>text</type> to <type>ltree</type>.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>ltree2text</primary></indexterm>
<function>ltree2text</function> ( <type>ltree</type> )
<returnvalue>text</returnvalue>
</para>
<para>
Casts <type>ltree</type> to <type>text</type>.
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm><primary>lca</primary></indexterm>
<function>lca</function> ( <type>ltree</type> <optional>, <type>ltree</type> <optional>, ... </optional></optional> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Computes longest common ancestor of paths
(up to 8 arguments are supported).
</para>
<para>
<literal>lca('1.2.3','1.2.3.4.5.6')</literal>
<returnvalue>1.2</returnvalue>
</para></entry>
</row>
<row>
<entry role="func_table_entry"><para role="func_signature">
<function>lca</function> ( <type>ltree[]</type> )
<returnvalue>ltree</returnvalue>
</para>
<para>
Computes longest common ancestor of paths in array.
</para>
<para>
<literal>lca(array['1.2.3'::ltree,'1.2.3.4'])</literal>
<returnvalue>1.2</returnvalue>
</para></entry>
</row>
</tbody>
</tgroup>
</table>
</sect2>
<sect2>
<title>Indexes</title>
<para>
<filename>ltree</filename> supports several types of indexes that can speed
up the indicated operators:
</para>
<itemizedlist>
<listitem>
<para>
B-tree index over <type>ltree</type>:
<literal><</literal>, <literal><=</literal>, <literal>=</literal>,
<literal>>=</literal>, <literal>></literal>
</para>
</listitem>
<listitem>
<para>
GiST index over <type>ltree</type> (<literal>gist_ltree_ops</literal>
opclass):
<literal><</literal>, <literal><=</literal>, <literal>=</literal>,
<literal>>=</literal>, <literal>></literal>,
<literal>@></literal>, <literal><@</literal>,
<literal>@</literal>, <literal>~</literal>, <literal>?</literal>
</para>
<para>
<literal>gist_ltree_ops</literal> GiST opclass approximates a set of
path labels as a bitmap signature. Its optional integer parameter
<literal>siglen</literal> determines the
signature length in bytes. The default signature length is 8 bytes.
Valid values of signature length are between 1 and 2024 bytes. Longer
signatures lead to a more precise search (scanning a smaller fraction of the index and
fewer heap pages), at the cost of a larger index.
</para>
<para>
Example of creating such an index with the default signature length of 8 bytes:
</para>
<programlisting>
CREATE INDEX path_gist_idx ON test USING GIST (path);
</programlisting>
<para>
Example of creating such an index with a signature length of 100 bytes:
</para>
<programlisting>
CREATE INDEX path_gist_idx ON test USING GIST (path gist_ltree_ops(siglen=100));
</programlisting>
</listitem>
<listitem>
<para>
GiST index over <type>ltree[]</type> (<literal>gist__ltree_ops</literal>
opclass):
<literal>ltree[] <@ ltree</literal>, <literal>ltree @> ltree[]</literal>,
<literal>@</literal>, <literal>~</literal>, <literal>?</literal>
</para>
<para>
<literal>gist__ltree_ops</literal> GiST opclass works similarly to
<literal>gist_ltree_ops</literal> and also takes signature length as
a parameter. The default value of <literal>siglen</literal> in
<literal>gist__ltree_ops</literal> is 28 bytes.
</para>
<para>
Example of creating such an index with the default signature length of 28 bytes:
</para>
<programlisting>
CREATE INDEX path_gist_idx ON test USING GIST (array_path);
</programlisting>
<para>
Example of creating such an index with a signature length of 100 bytes:
</para>
<programlisting>
CREATE INDEX path_gist_idx ON test USING GIST (array_path gist__ltree_ops(siglen=100));
</programlisting>
<para>
Note: This index type is lossy.
</para>
</listitem>
</itemizedlist>
</sect2>
<sect2>
<title>Example</title>
<para>
This example uses the following data (also available in file
<filename>contrib/ltree/ltreetest.sql</filename> in the source distribution):
</para>
<programlisting>
CREATE TABLE test (path ltree);
INSERT INTO test VALUES ('Top');
INSERT INTO test VALUES ('Top.Science');
INSERT INTO test VALUES ('Top.Science.Astronomy');
INSERT INTO test VALUES ('Top.Science.Astronomy.Astrophysics');
INSERT INTO test VALUES ('Top.Science.Astronomy.Cosmology');
INSERT INTO test VALUES ('Top.Hobbies');
INSERT INTO test VALUES ('Top.Hobbies.Amateurs_Astronomy');
INSERT INTO test VALUES ('Top.Collections');
INSERT INTO test VALUES ('Top.Collections.Pictures');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Stars');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Galaxies');
INSERT INTO test VALUES ('Top.Collections.Pictures.Astronomy.Astronauts');
CREATE INDEX path_gist_idx ON test USING GIST (path);
CREATE INDEX path_idx ON test USING BTREE (path);
</programlisting>
<para>
Now, we have a table <structname>test</structname> populated with data describing
the hierarchy shown below:
</para>
<literallayout class="monospaced">
Top
/ | \
Science Hobbies Collections
/ | \
Astronomy Amateurs_Astronomy Pictures
/ \ |
Astrophysics Cosmology Astronomy
/ | \
Galaxies Stars Astronauts
</literallayout>
<para>
We can do inheritance:
<screen>
ltreetest=> SELECT path FROM test WHERE path <@ 'Top.Science';
path
------------------------------------
Top.Science
Top.Science.Astronomy
Top.Science.Astronomy.Astrophysics
Top.Science.Astronomy.Cosmology
(4 rows)
</screen>
</para>
<para>
Here are some examples of path matching:
<screen>
ltreetest=> SELECT path FROM test WHERE path ~ '*.Astronomy.*';
path
-----------------------------------------------
Top.Science.Astronomy
Top.Science.Astronomy.Astrophysics
Top.Science.Astronomy.Cosmology
Top.Collections.Pictures.Astronomy
Top.Collections.Pictures.Astronomy.Stars
Top.Collections.Pictures.Astronomy.Galaxies
Top.Collections.Pictures.Astronomy.Astronauts
(7 rows)
ltreetest=> SELECT path FROM test WHERE path ~ '*.!pictures@.Astronomy.*';
path
------------------------------------
Top.Science.Astronomy
Top.Science.Astronomy.Astrophysics
Top.Science.Astronomy.Cosmology
(3 rows)
</screen>
</para>
<para>
Here are some examples of full text search:
<screen>
ltreetest=> SELECT path FROM test WHERE path @ 'Astro*% & !pictures@';
path
------------------------------------
Top.Science.Astronomy
Top.Science.Astronomy.Astrophysics
Top.Science.Astronomy.Cosmology
Top.Hobbies.Amateurs_Astronomy
(4 rows)
ltreetest=> SELECT path FROM test WHERE path @ 'Astro* & !pictures@';
path
------------------------------------
Top.Science.Astronomy
Top.Science.Astronomy.Astrophysics
Top.Science.Astronomy.Cosmology
(3 rows)
</screen>
</para>
<para>
Path construction using functions:
<screen>
ltreetest=> SELECT subpath(path,0,2)||'Space'||subpath(path,2) FROM test WHERE path <@ 'Top.Science.Astronomy';
?column?
------------------------------------------
Top.Science.Space.Astronomy
Top.Science.Space.Astronomy.Astrophysics
Top.Science.Space.Astronomy.Cosmology
(3 rows)
</screen>
</para>
<para>
We could simplify this by creating a SQL function that inserts a label
at a specified position in a path:
<screen>
CREATE FUNCTION ins_label(ltree, int, text) RETURNS ltree
AS 'select subpath($1,0,$2) || $3 || subpath($1,$2);'
LANGUAGE SQL IMMUTABLE;
ltreetest=> SELECT ins_label(path,2,'Space') FROM test WHERE path <@ 'Top.Science.Astronomy';
ins_label
------------------------------------------
Top.Science.Space.Astronomy
Top.Science.Space.Astronomy.Astrophysics
Top.Science.Space.Astronomy.Cosmology
(3 rows)
</screen>
</para>
</sect2>
<sect2>
<title>Transforms</title>
<para>
Additional extensions are available that implement transforms for
the <type>ltree</type> type for PL/Python. The extensions are
called <literal>ltree_plpythonu</literal>, <literal>ltree_plpython2u</literal>,
and <literal>ltree_plpython3u</literal>
(see <xref linkend="plpython-python23"/> for the PL/Python naming
convention). If you install these transforms and specify them when
creating a function, <type>ltree</type> values are mapped to Python lists.
(The reverse is currently not supported, however.)
</para>
<caution>
<para>
It is strongly recommended that the transform extensions be installed in
the same schema as <filename>ltree</filename>. Otherwise there are
installation-time security hazards if a transform extension's schema
contains objects defined by a hostile user.
</para>
</caution>
</sect2>
<sect2>
<title>Authors</title>
<para>
All work was done by Teodor Sigaev (<email>teodor@stack.net</email>) and
Oleg Bartunov (<email>oleg@sai.msu.su</email>). See
<ulink url="http://www.sai.msu.su/~megera/postgres/gist/"></ulink> for
additional information. Authors would like to thank Eugeny Rodichev for
helpful discussions. Comments and bug reports are welcome.
</para>
</sect2>
</sect1>
|
{
"pile_set_name": "Github"
}
|
#include "IF_EXE_M_R_StructsC.h"
#include "ace/Get_Opt.h"
#include "ace/OS_NS_string.h"
const ACE_TCHAR *ior = ACE_TEXT ("file://test.ior");
int
parse_args (int argc, ACE_TCHAR *argv[])
{
ACE_Get_Opt get_opts (argc, argv, ACE_TEXT("k:"));
int c;
while ((c = get_opts ()) != -1)
switch (c)
{
case 'k':
ior = get_opts.opt_arg ();
break;
case '?':
default:
ACE_ERROR_RETURN ((LM_ERROR,
"usage: %s "
"-k <ior> "
"\n",
argv [0]),
-1);
}
// Indicates successful parsing of the command line
return 0;
}
CORBA::ORB_var m_ORB_p;
class IF_Test_client
{
public:
int
foo (int argc, ACE_TCHAR *argv[], bool shutdown)
{
IF_EXE_M_R::IF_ExeCtrlData_var client;
IF_EXE_M_R::Test_Struct ts;
IF_EXE_M_R::CORBA_BaseFOOG FOOG;
IF_EXE_M_R::CORBA_FOOIInPlan bs;
FOOG.FOOD = IF_EXE_M_R::ENUM_FOOD (0);
FOOG.FOOIID = CORBA::string_dup ("A_FOOG");
ACE_OS::memset (&FOOG.startPosition,
0, sizeof(IF_EXE_M_R::CORBA_LatLongFOOB));
ACE_OS::memset (&FOOG.endPosition,
0, sizeof(IF_EXE_M_R::CORBA_LatLongFOOB));
FOOG.FOOBReferenceStartPosition = IF_EXE_M_R::FOOBMode_E (0);
FOOG.averageFOOHFOOB = 0.0;
FOOG.FOOBControlMode = IF_EXE_M_R::ENUM_ControlMode (0);
FOOG.datasetVersion = 1L;
FOOG.maxHorizontalFOOU = 0L;
FOOG.maxVerticalFOOU = 0L;
FOOG.FOOTDistance = 0.0;
FOOG.startFOOA = 0.0;
FOOG.FOOTDuration = 0.0;
FOOG.endFOOA = 0.0;
FOOG.FOOTEnergyConsum = 0.0;
FOOG.FOOAReference = IF_EXE_M_R::FOOAMode_E (0);
FOOG.FOOAControlMode = IF_EXE_M_R::ENUM_ControlMode (0);
FOOG.FOOBReferenceEndPosition = IF_EXE_M_R::FOOBMode_E (0);
FOOG.economyMode = IF_EXE_M_R::ENUM_Selection (0);
FOOG.cl_FOOSMode = IF_EXE_M_R::ENUM_CL_FOOSMode (0);
FOOG.transitionStatus = IF_EXE_M_R::ENUM_TransitionStatus (0);
FOOG.FOOKTypeStart = IF_EXE_M_R::ENUM_FOOKType (0);
FOOG.activeComponents.length (0);
FOOG.FOOKTypeEnd = IF_EXE_M_R::ENUM_FOOKType (0);
bs.type = IF_EXE_M_R::BASE_FOOG;
bs.data.FOOVFOOG (FOOG);
ts.l = 42L;
ts.whatEver <<= bs;
try
{
if (parse_args (argc, argv) != 0)
return 1;
CORBA::Object_var obj = m_ORB_p->string_to_object (ior);
if (CORBA::is_nil (obj.in ()))
{
ACE_ERROR_RETURN ((LM_DEBUG,
"Nil Test::Hello reference <%s>\n",
ior),
1);
}
ACE_DEBUG ((LM_DEBUG, "RepositoryID: %s\n", obj->_interface_repository_id()));
client = IF_EXE_M_R::IF_ExeCtrlData::_narrow (obj.in ());
if (CORBA::is_nil (client.in ()))
{
ACE_ERROR_RETURN ((LM_DEBUG,
"Reference is not of type IF_EXE_M_R::IF_ExeCtrlData\n"),
1);
}
client->foo (ts);
if (shutdown)
{
client->shutdown ();
}
}
catch (const CORBA::Exception &ex)
{
ex._tao_print_exception ("Exception caught:");
}
return 0;
}
};
int
ACE_TMAIN (int argc, ACE_TCHAR *argv[])
{
int retval = 0;
try
{
m_ORB_p = CORBA::ORB_init (argc, argv);
IF_Test_client testclient;
for (int i = 0; i < 10; i++)
{
retval += testclient.foo (argc, argv, i == 9);
}
}
catch (const ::CORBA::Exception &ex)
{
ex._tao_print_exception("ERROR : unexpected CORBA exception caugth :");
++retval;
}
return retval;
}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __ALIYUN_HOST_H__
#define __ALIYUN_HOST_H__
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif /**< _cplusplus */
typedef enum {
ALIYUN_MQTT_REGION_SHANGHAI, /* Shanghai */
ALIYUN_MQTT_REGION_SINGAPORE, /* Singapore */
ALIYUN_MQTT_REGION_JAPAN, /* Japan */
ALIYUN_MQTT_REGION_USA_WEST, /* America */
ALIYUN_MQTT_REGION_GERMANY, /* Germany */
ALIYUN_MQTT_REGION_CUSTOM, /* Custom setting */
ALIYUN_MQTT_DOMAIN_MAX /* Maximum number of domain */
} aliyun_mqtt_region_types_t;
typedef enum {
ALIYUN_HTTP_REGION_SHANGHAI, /* Shanghai */
ALIYUN_HTTP_REGION_SINGAPORE, /* Singapore */
ALIYUN_HTTP_REGION_JAPAN, /* Japan */
ALIYUN_HTTP_REGION_USA_WEST, /* America */
ALIYUN_HTTP_REGION_GERMANY, /* Germany */
ALIYUN_HTTP_REGION_CUSTOM, /* Custom setting */
ALIYUN_HTTP_DOMAIN_MAX /* Maximum number of domain */
} aliyun_http_region_types_t;
#define ALIYUN_MQTT_DOMAIN_NUMBER (ALIYUN_MQTT_DOMAIN_MAX)
#define ALIYUN_HTTP_DOMAIN_NUMBER (ALIYUN_HTTP_DOMAIN_MAX)
extern const char *g_aliyun_mqtt_domain[ALIYUN_MQTT_DOMAIN_NUMBER];
extern const char *g_aliyun_http_domain[ALIYUN_HTTP_DOMAIN_NUMBER];
#ifdef __cplusplus
}
#endif /**< _cplusplus */
#endif /**< __ALIYUN_DEFS_H__ */
|
{
"pile_set_name": "Github"
}
|
# File src/library/grDevices/R/calc.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2017 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
#### Functions that calculate useful stuff for plotting
#### BUT which do not do any actual drawing
#### Useful for both graphics and grid to have access to
boxplot.stats <- function(x, coef = 1.5, do.conf = TRUE, do.out = TRUE)
{
if(coef < 0) stop("'coef' must not be negative")
nna <- !is.na(x)
n <- sum(nna) # including +/- Inf
stats <- stats::fivenum(x, na.rm = TRUE)
iqr <- diff(stats[c(2, 4)])
if(coef == 0)
do.out <- FALSE
else { ## coef > 0
out <- if(!is.na(iqr)) { x < (stats[2L] - coef * iqr) |
x > (stats[4L] + coef * iqr)
} else !is.finite(x)
if(any(out[nna], na.rm = TRUE))
stats[c(1, 5)] <- range(x[!out], na.rm = TRUE)
}
conf <- if(do.conf) stats[3L] + c(-1.58, 1.58) * iqr / sqrt(n)
list(stats = stats, n = n, conf = conf,
out = if(do.out) x[out & nna] else numeric())
}
## Contour lines
contourLines <-
function (x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)),
z, nlevels = 10, levels = pretty(range(z, na.rm = TRUE), nlevels))
{
## FIXME: This "validation" code for the x, y, z values
## should be put in a function for contourLines, contour,
## image (and persp?) to share. Unfortunately, an xyz.coords
## already exists which isn't really compatible with the
## desired behaviour here.
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z; y <- x$y; x <- x$x
} else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
} else stop("no 'z' matrix specified")
} else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0))
stop("increasing 'x' and 'y' values expected")
if (!is.matrix(z) || nrow(z) <= 1 || ncol(z) <= 1)
stop("no proper 'z' matrix specified")
if (1.0 * length(x) * length(y) != length(z))
stop("dimensions of 'x', 'y' and 'z' do not match")
.External2(C_contourLines, x, y, z, levels)
}
chull <- function(x, y = NULL)
{
X <- xy.coords(x, y, recycle = TRUE, setLab = FALSE)
x <- cbind(X$x, X$y)
if(any(!is.finite(x))) stop("finite coordinates are needed")
if(nrow(x) == 0) return(integer())
if(nrow(x) == 1) return(1L)
res <- .Call(C_chull, x)
## if this is called on multiple copies of a single point
## res is of length one.
if (length(res) < 2L) return(res)
## fix up order: needed in rare cases: PR#15127
xx <- sweep(x[res, ], 2L, colMeans(x[res, ]))
angs <- atan2(xx[, 2L], -xx[, 1L])
res[order(angs)]
}
nclass.Sturges <- function(x) ceiling(log2(length(x)) + 1)
nclass.scott <- function(x)
{
h <- 3.5 * sqrt(stats::var(x)) * length(x)^(-1/3)
if(h > 0) max(1, ceiling(diff(range(x))/h)) else 1L
}
nclass.FD <- function(x)
{
h <- 2 * stats::IQR(x. <- signif(x, digits = 5))
if (h == 0) {
x. <- sort(x.)
al <- 1/4; al.min <- 1/512 # try quantiles 1/8, 1/16, ... 1/512
while(h == 0 && (al <- al/2) >= al.min)
h <- diff(stats::quantile(x., c(al, 1-al), names = FALSE)) / (1 - 2*al)
}
if (h == 0) ## revert to Scott's:
h <- 3.5 * sqrt(stats::var(x))
if (h > 0) ceiling(diff(range(x))/h * length(x)^(1/3)) else 1L
}
## Sunflower Plot computation:
## Used to be part of ../../graphics/R/sunflowerplot.R :
xyTable <- function(x, y = NULL, digits)
{
## Compute number := multiplicities of (x[i], y[i])
x <- xy.coords(x, y, setLab = FALSE)
## get rid of rounding fuzz:
y <- signif(x$y, digits=digits)
x <- signif(x$x, digits=digits)
n <- length(x)
number <-
if(n > 0) {
orderxy <- order(x, y)
x <- x[orderxy]
y <- y[orderxy]
first <- c(TRUE, (x[-1L] != x[-n]) | (y[-1L] != y[-n]))
x <- x[first]
y <- y[first]
diff(c((1L:n)[first], n + 1L))
}
else integer()
list(x = x, y = y, number = number)
}
axisTicks <- function(usr, log, axp = NULL, nint = 5) {
if(is.null(axp))
axp <- unlist(.axisPars(usr, log=log, nintLog=nint), use.names=FALSE)
.Call(C_R_CreateAtVector, axp, if(log) 10^usr else usr, nint, log)
}
.axisPars <- function(usr, log = FALSE, nintLog = 5) {
.Call(C_R_GAxisPars, usr, log, nintLog)
}
|
{
"pile_set_name": "Github"
}
|
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This message allows the MOBILITY::MAPPER zones to be retrieved
---
time timestamp # When these zones were updated
ff_msgs/Zone[] zones # A vector of zones
|
{
"pile_set_name": "Github"
}
|
# Define working variables
$octopusURL = "https://youroctourl"
$octopusAPIKey = "API-YOURAPIKEY"
$header = @{ "X-Octopus-ApiKey" = $octopusAPIKey }
$projectName = "MyProject"
$librarySetName = "MyLibrarySet"
try
{
# Get space
$space = (Invoke-RestMethod -Method Get -Uri "$octopusURL/api/spaces/all" -Headers $header) | Where-Object {$_.Name -eq $spaceName}
# Get project
$project = (Invoke-RestMethod -Method Get -Uri "$octopusURL/api/$($space.Id)/projects/all" -Headers $header) | Where-Object {$_.Name -eq $projectName}
# Get library set
$librarySet = (Invoke-RestMethod -Method Get -Uri "$octopusURL/api/$($space.Id)/libraryvariablesets/all" -Headers $header) | Where-Object {$_.Name -eq $librarySetName}
# Add the libarary set
$project.IncludedLibraryVariableSetIds += $librarySet.Id
# Update the project
Invoke-RestMethod -Method Put -Uri "$octopusURL/api/$($space.Id)/projects/$($project.Id)" -Headers $header -Body ($project | ConvertTo-Json -Depth 10)
}
catch
{
Write-Host $_.Exception.Message
}
|
{
"pile_set_name": "Github"
}
|
# Pandora language file EN=>BN
# See full list of phrases in "ru.txt" file
_Business=>ব্যবসায়
_Node=>নোড
_Region=>এলাকা
_World=>বিশ্ব
People=>সম্প্রদায়
Communities=>সম্প্রদায়গুলি
Files=>নথি পত্র
|
{
"pile_set_name": "Github"
}
|
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "pwa-node",
"request": "launch",
"name": "[node-ts] Launch Program",
"program": "${workspaceFolder}/out/index.js",
"preLaunchTask": "npm: compile"
},
{
"type": "pwa-node",
"request": "launch",
"name": "[node-ts] Launch Root Tests Program",
"program": "${workspaceFolder}/out/in/another/location/out/index.js",
"remoteRoot": "${workspaceFolder}/out/in/another/location",
"localRoot": "${workspaceFolder}",
"preLaunchTask": "npm: compile"
}
]
}
|
{
"pile_set_name": "Github"
}
|
// FieldAttributes.cs
//
// This code was automatically generated from
// ECMA CLI XML Library Specification.
// Generator: libgen.xsl [1.0; (C) Sergey Chaban (serge@wildwestsoftware.com)]
// Created: Wed, 5 Sep 2001 06:39:12 UTC
// Source file: all.xml
// URL: http://devresource.hp.com/devresource/Docs/TechPapers/CSharp/all.xml
//
// (C) 2001 Ximian, Inc. http://www.ximian.com
//
// Copyright (C) 2004 Novell, Inc (http://www.novell.com)
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using System.Runtime.InteropServices;
namespace System.Reflection {
/// <summary>
/// </summary>
[ComVisible (true)]
[Serializable]
[Flags]
public enum FieldAttributes {
/// <summary>
/// </summary>
FieldAccessMask = 7,
/// <summary>
/// </summary>
PrivateScope = 0x0,
/// <summary>
/// </summary>
Private = 0x1,
/// <summary>
/// </summary>
FamANDAssem = 0x2,
/// <summary>
/// </summary>
Assembly = 0x3,
/// <summary>
/// </summary>
Family = 0x4,
/// <summary>
/// </summary>
FamORAssem = 0x5,
/// <summary>
/// </summary>
Public = 0x6,
/// <summary>
/// </summary>
Static = 0x10,
/// <summary>
/// </summary>
InitOnly = 0x20,
/// <summary>
/// </summary>
Literal = 0x40,
/// <summary>
/// </summary>
NotSerialized = 0x80,
/// <summary>
/// </summary>
HasFieldRVA = 0x100,
/// <summary>
/// </summary>
SpecialName = 0x200,
/// <summary>
/// </summary>
RTSpecialName = 0x400,
/// <summary>
/// </summary>
HasFieldMarshal = 0x1000,
/// <summary>
/// </summary>
PinvokeImpl = 0x2000,
/// <summary>
/// </summary>
// HasSecurity = 0x4000,
/// <summary>
/// </summary>
HasDefault = 0x8000,
/// <summary>
/// </summary>
ReservedMask = HasDefault | HasFieldMarshal | RTSpecialName | HasFieldRVA,
} // FieldAttributes
} // System.Reflection
|
{
"pile_set_name": "Github"
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06.c
Label Definition File: CWE127_Buffer_Underread__malloc.label.xml
Template File: sources-sink-06.tmpl.c
*/
/*
* @description
* CWE: 127 Buffer Under-read
* BadSource: Set data pointer to before the allocated memory buffer
* GoodSource: Set data pointer to the allocated memory buffer
* Sink: ncpy
* BadSink : Copy data to string using wcsncpy
* Flow Variant: 06 Control flow: if(STATIC_CONST_FIVE==5) and if(STATIC_CONST_FIVE!=5)
*
* */
#include "std_testcase.h"
#include <wchar.h>
/* The variable below is declared "const", so a tool should be able
* to identify that reads of this will always give its initialized value. */
static const int STATIC_CONST_FIVE = 5;
#ifndef OMITBAD
void CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_bad()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE==5)
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FLAW: Set data pointer to before the allocated memory buffer */
data = dataBuffer - 8;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B1() - use goodsource and badsink by changing the STATIC_CONST_FIVE==5 to STATIC_CONST_FIVE!=5 */
static void goodG2B1()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE!=5)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
printLine("Benign, fixed string");
}
else
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
/* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */
static void goodG2B2()
{
wchar_t * data;
data = NULL;
if(STATIC_CONST_FIVE==5)
{
{
wchar_t * dataBuffer = (wchar_t *)malloc(100*sizeof(wchar_t));
wmemset(dataBuffer, L'A', 100-1);
dataBuffer[100-1] = L'\0';
/* FIX: Set data pointer to the allocated memory buffer */
data = dataBuffer;
}
}
{
wchar_t dest[100];
wmemset(dest, L'C', 100-1); /* fill with 'C's */
dest[100-1] = L'\0'; /* null terminate */
/* POTENTIAL FLAW: Possibly copy from a memory location located before the source buffer */
wcsncpy(dest, data, wcslen(dest));
/* Ensure null termination */
dest[100-1] = L'\0';
printWLine(dest);
/* INCIDENTAL CWE-401: Memory Leak - data may not point to location
* returned by malloc() so can't safely call free() on it */
}
}
void CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_good()
{
goodG2B1();
goodG2B2();
}
#endif /* OMITGOOD */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
CWE127_Buffer_Underread__malloc_wchar_t_ncpy_06_bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
{
"pile_set_name": "Github"
}
|
[
{
"description": "maxLength validation",
"schema": {"maxLength": 2},
"tests": [
{
"description": "shorter is valid",
"data": "f",
"valid": true
},
{
"description": "exact length is valid",
"data": "fo",
"valid": true
},
{
"description": "too long is invalid",
"data": "foo",
"valid": false
},
{
"description": "ignores non-strings",
"data": 10,
"valid": true
},
{
"description": "two supplementary Unicode code points is long enough",
"data": "\uD83D\uDCA9\uD83D\uDCA9",
"valid": true
}
]
}
]
|
{
"pile_set_name": "Github"
}
|
Shader "Hidden/Shader Forge/SFN_Fresnel_NRM_EXP" {
Properties {
_OutputMask ("Output Mask", Vector) = (1,1,1,1)
_NRM ("Nrm", 2D) = "black" {}
_EXP ("Exp", 2D) = "black" {}
}
SubShader {
Tags {
"RenderType"="Opaque"
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#define UNITY_PASS_FORWARDBASE
#include "UnityCG.cginc"
#pragma target 3.0
uniform float4 _OutputMask;
uniform sampler2D _NRM;
uniform sampler2D _EXP;
struct VertexInput {
float4 vertex : POSITION;
float2 texcoord0 : TEXCOORD0;
};
struct VertexOutput {
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 posWorld : TEXCOORD1;
float4 screenPos : TEXCOORD2;
};
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.uv = v.texcoord0;
o.posWorld = mul(unity_ObjectToWorld, v.vertex);
o.pos = UnityObjectToClipPos(v.vertex );
o.screenPos = float4( o.pos.xy / o.pos.w, 0, 0 );
o.screenPos.y *= _ProjectionParams.x;
return o;
}
float4 frag(VertexOutput i) : COLOR {
#if UNITY_UV_STARTS_AT_TOP
float grabSign = -_ProjectionParams.x;
#else
float grabSign = _ProjectionParams.x;
#endif
float2 sceneUVs = float2(1,grabSign)*i.screenPos.xy*0.5+0.5;
float3 viewDirection = tex2D( _NRM, float2(0.5,0.5) );
// Read inputs
float4 _nrm = tex2D( _NRM, i.uv );
float4 _exp = tex2D( _EXP, i.uv );
// Operator
float4 outputColor = pow( 1.0-max(0,dot(_nrm.xyz, viewDirection)), _exp.x );
// Return
return outputColor * _OutputMask;
}
ENDCG
}
}
}
|
{
"pile_set_name": "Github"
}
|
{{- if .Values.compass.enabled }}
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ .Values.compass.name }}
service: {{ .Values.compass.name }}
name: {{ .Values.compass.name }}
namespace: {{ .Release.Namespace }}
spec:
ports:
{{ range $i, $var := .Values.compass.service.ports -}}
- name: {{ $var.name }}
port: {{ $var.port }}
targetPort: {{ $var.port }}
{{ end }}
selector:
app: {{ .Values.compass.name }}
type: {{ .Values.compass.service.type}}
{{- end }}
|
{
"pile_set_name": "Github"
}
|
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_prog_java.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_PROG_JAVA
#
# DESCRIPTION
#
# Here is a summary of the main macros:
#
# AX_PROG_JAVAC: finds a Java compiler.
#
# AX_PROG_JAVA: finds a Java virtual machine.
#
# AX_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
#
# AX_CHECK_RQRD_CLASS: finds if we have the given class and stops
# otherwise.
#
# AX_TRY_COMPILE_JAVA: attempt to compile user given source.
#
# AX_TRY_RUN_JAVA: attempt to compile and run user given source.
#
# AX_JAVA_OPTIONS: adds Java configure options.
#
# AX_PROG_JAVA tests an existing Java virtual machine. It uses the
# environment variable JAVA then tests in sequence various common Java
# virtual machines. For political reasons, it starts with the free ones.
# You *must* call [AX_PROG_JAVAC] before.
#
# If you want to force a specific VM:
#
# - at the configure.in level, set JAVA=yourvm before calling AX_PROG_JAVA
#
# (but after AC_INIT)
#
# - at the configure level, setenv JAVA
#
# You can use the JAVA variable in your Makefile.in, with @JAVA@.
#
# *Warning*: its success or failure can depend on a proper setting of the
# CLASSPATH env. variable.
#
# TODO: allow to exclude virtual machines (rationale: most Java programs
# cannot run with some VM like kaffe).
#
# Note: This is part of the set of autoconf M4 macros for Java programs.
# It is VERY IMPORTANT that you download the whole set, some macros depend
# on other. Unfortunately, the autoconf archive does not support the
# concept of set of macros, so I had to break it for submission.
#
# A Web page, with a link to the latest CVS snapshot is at
# <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
#
# This is a sample configure.in Process this file with autoconf to produce
# a configure script.
#
# AC_INIT(UnTag.java)
#
# dnl Checks for programs.
# AC_CHECK_CLASSPATH
# AX_PROG_JAVAC
# AX_PROG_JAVA
#
# dnl Checks for classes
# AX_CHECK_RQRD_CLASS(org.xml.sax.Parser)
# AX_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
#
# AC_OUTPUT(Makefile)
#
# LICENSE
#
# Copyright (c) 2008 Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 8
AU_ALIAS([AC_PROG_JAVA], [AX_PROG_JAVA])
AC_DEFUN([AX_PROG_JAVA],[
if test x$JAVAPREFIX = x; then
test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe java)
else
test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe java, $JAVAPREFIX)
fi
test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
AX_PROG_JAVA_WORKS
AC_PROVIDE([$0])dnl
])
|
{
"pile_set_name": "Github"
}
|
use super::SockAddr;
use libc::{sockaddr, c_void, c_char, c_uchar, c_short, c_ushort, c_int, c_uint, c_ulong};
use std::ptr;
use std::mem;
use std::ffi::{CStr, CString};
use std::net::Ipv4Addr;
use std::io::{self, Error, ErrorKind};
use std::os::unix::io::{RawFd, AsRawFd, IntoRawFd};
pub const IFNAMSIZ: usize = 16;
pub const IFF_UP: c_short = 0x1;
pub const IFF_RUNNING: c_short = 0x40;
pub const IFF_TUN: c_short = 0x0001;
pub const IFF_NO_PI: c_short = 0x1000;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct ifmap {
pub mem_start: c_ulong,
pub mem_end: c_ulong,
pub base_addr: c_ushort,
pub irq: c_uchar,
pub dma: c_uchar,
pub port: c_uchar,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifsu {
pub raw_hdlc_proto: *mut c_void,
pub cisco: *mut c_void,
pub fr: *mut c_void,
pub fr_pvc: *mut c_void,
pub fr_pvc_info: *mut c_void,
pub sync: *mut c_void,
pub te1: *mut c_void,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct if_settings {
pub type_: c_uint,
pub size: c_uint,
pub ifsu: ifsu,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifrn {
pub name: [c_char; IFNAMSIZ],
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union ifru {
pub addr: sockaddr,
pub dstaddr: sockaddr,
pub broadaddr: sockaddr,
pub netmask: sockaddr,
pub hwaddr: sockaddr,
pub flags: c_short,
pub ivalue: c_int,
pub mtu: c_int,
pub map: ifmap,
pub slave: [c_char; IFNAMSIZ],
pub newname: [c_char; IFNAMSIZ],
pub data: *mut c_void,
pub settings: if_settings,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct ifreq {
pub ifrn: ifrn,
pub ifru: ifru,
}
ioctl!(bad read siocgifflags with 0x8913; ifreq);
ioctl!(bad write siocsifflags with 0x8914; ifreq);
ioctl!(bad read siocgifaddr with 0x8915; ifreq);
ioctl!(bad write siocsifaddr with 0x8916; ifreq);
ioctl!(bad read siocgifdstaddr with 0x8917; ifreq);
ioctl!(bad write siocsifdstaddr with 0x8918; ifreq);
ioctl!(bad read siocgifbrdaddr with 0x8919; ifreq);
ioctl!(bad write siocsifbrdaddr with 0x891a; ifreq);
ioctl!(bad read siocgifnetmask with 0x891b; ifreq);
ioctl!(bad write siocsifnetmask with 0x891c; ifreq);
ioctl!(bad read siocgifmtu with 0x8921; ifreq);
ioctl!(bad write siocsifmtu with 0x8922; ifreq);
ioctl!(bad write siocsifname with 0x8923; ifreq);
ioctl!(write tunsetiff with b'T', 202; c_int);
ioctl!(write tunsetpersist with b'T', 203; c_int);
ioctl!(write tunsetowner with b'T', 204; c_int);
ioctl!(write tunsetgroup with b'T', 206; c_int);
#[derive(Debug)]
pub struct Device {
name: String,
tun: RawFd,
ctl: RawFd,
}
impl Device {
pub fn new(name: &str) -> Result<Self, Error> {
let name = CString::new(name.clone()).unwrap();
if name.as_bytes_with_nul().len() > IFNAMSIZ {
return Err(Error::new(ErrorKind::InvalidInput, "name too long"));
}
let (tun, ctl, name) = unsafe {
let tun = libc::open(b"/dev/net/tun\0".as_ptr() as *const _, libc::O_RDWR);
if tun < 0 {
return Err(io::Error::last_os_error());
}
let mut req: ifreq = mem::zeroed();
ptr::copy_nonoverlapping(name.as_ptr() as *const c_char, req.ifrn.name.as_mut_ptr(), name.as_bytes().len());
req.ifru.flags = IFF_TUN | IFF_NO_PI;
if tunsetiff(tun, &mut req as *mut _ as *mut _) < 0 {
return Err(io::Error::last_os_error());
}
let ctl = libc::socket(libc::AF_INET, libc::SOCK_DGRAM, 0);
if ctl < 0 {
return Err(io::Error::last_os_error());
}
(tun, ctl, CStr::from_ptr(req.ifrn.name.as_ptr()).to_string_lossy().into())
};
Ok(Device {
name: name,
tun: tun,
ctl: ctl,
})
}
/// Set the owner of the device.
pub fn user(&mut self, value: i32) -> Result<(), Error> {
unsafe {
if tunsetowner(self.tun, &value) < 0 {
return Err(io::Error::last_os_error())
}
}
Ok(())
}
/// Set the group of the device.
pub fn group(&mut self, value: i32) -> Result<(), Error> {
unsafe {
if tunsetgroup(self.tun, &value) < 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Prepare a new request.
#[inline]
unsafe fn request(&self) -> ifreq {
let mut req: ifreq = mem::zeroed();
ptr::copy_nonoverlapping(self.name.as_ptr() as *const c_char, req.ifrn.name.as_mut_ptr(), self.name.len());
req
}
pub fn set_name(&mut self, value: &str) -> Result<(), Error> {
unsafe {
let name = CString::new(value)?;
if name.as_bytes_with_nul().len() > IFNAMSIZ {
return Err(Error::new(ErrorKind::InvalidInput, "name too long"));
}
let mut req = self.request();
ptr::copy_nonoverlapping(name.as_ptr() as *const c_char, req.ifru.newname.as_mut_ptr(), value.len());
if siocsifname(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
self.name = value.into();
Ok(())
}
}
pub fn address(&self) -> Result<Ipv4Addr, Error> {
unsafe {
let mut req = self.request();
if siocgifaddr(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
SockAddr::new(&req.ifru.addr).map(Into::into)
}
}
pub fn set_address<T: Into<Ipv4Addr>>(&mut self, value: T) -> Result<(), Error> {
unsafe {
let mut req = self.request();
req.ifru.addr = SockAddr::from(value.into()).into();
if siocsifaddr(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
pub fn destination(&self) -> Result<Ipv4Addr, Error> {
unsafe {
let mut req = self.request();
if siocgifdstaddr(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
SockAddr::new(&req.ifru.dstaddr).map(Into::into)
}
}
pub fn set_destination<T: Into<Ipv4Addr>>(&mut self, value: T) -> Result<(), Error> {
unsafe {
let mut req = self.request();
req.ifru.dstaddr = SockAddr::from(value.into()).into();
if siocsifdstaddr(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
pub fn broadcast(&self) -> Result<Ipv4Addr, Error> {
unsafe {
let mut req = self.request();
if siocgifbrdaddr(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
SockAddr::new(&req.ifru.broadaddr).map(Into::into)
}
}
pub fn set_broadcast<T: Into<Ipv4Addr>>(&mut self, value: T) -> Result<(), Error> {
unsafe {
let mut req = self.request();
req.ifru.broadaddr = SockAddr::from(value.into()).into();
if siocsifbrdaddr(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
pub fn netmask(&self) -> Result<Ipv4Addr, Error> {
unsafe {
let mut req = self.request();
if siocgifnetmask(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
SockAddr::new(&req.ifru.netmask).map(Into::into)
}
}
pub fn set_netmask<T: Into<Ipv4Addr>>(&mut self, value: T) -> Result<(), Error> {
unsafe {
let mut req = self.request();
req.ifru.netmask = SockAddr::from(value.into()).into();
if siocsifnetmask(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
pub fn mtu(&self) -> Result<i32, Error> {
unsafe {
let mut req = self.request();
if siocgifmtu(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(req.ifru.mtu)
}
}
pub fn set_mtu(&mut self, value: i32) -> Result<(), Error> {
// Minimum MTU required of all links supporting IPv4. See RFC 791 § 3.1.
pub const IPV4_MIN_MTU: i32 = 576;
// Minimum MTU required of all links supporting IPv6. See RFC 8200 § 5.
// pub const IPV6_MIN_MTU: i32 = 1280;
if value < IPV4_MIN_MTU {
return Err(Error::new(ErrorKind::InvalidInput, "MTU is too small"));
}
unsafe {
let mut req = self.request();
req.ifru.mtu = value;
if siocsifmtu(self.ctl, &req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
pub fn enabled(&mut self, value: bool) -> Result<(), Error> {
unsafe {
let mut req = self.request();
if siocgifflags(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
if value {
req.ifru.flags |= IFF_UP | IFF_RUNNING;
}
else {
req.ifru.flags &= !IFF_UP;
}
if siocsifflags(self.ctl, &mut req) < 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
}
}
impl AsRawFd for Device {
fn as_raw_fd(&self) -> RawFd {
self.tun
}
}
impl IntoRawFd for Device {
fn into_raw_fd(self) -> RawFd {
self.tun
}
}
impl Drop for Device {
fn drop(&mut self) {
unsafe {
if self.ctl >= 0 {
libc::close(self.ctl);
}
if self.tun >= 0 {
libc::close(self.tun);
}
}
}
}
|
{
"pile_set_name": "Github"
}
|
#!/bin/sh /etc/rc.common
# Copyright (C) 2012 OpenWrt.org
START=97
USE_PROCD=1
EXTRA_COMMANDS="dslstat lucistat"
EXTRA_HELP=" dslstat Get DSL status information
lucistat Get status information in lua friendly format"
. /lib/functions/lantiq_dsl.sh
#
# ITU-T G.997.1 (06/2012) - Section 7.3.1.1.1 (xTU transmission system enabling (XTSE))
# ITU-T G.997.1 Amendment 2 (04/2013) - Section 2.1 - (Vectoring mode enable (VECTORMODE_ENABLE))
#
# G.992.1 Annex A
# G.992.2 Annex A
# G.992.3 Annex A / L-US1 / L_US-2 / M
# G.992.5 Annex A / M
# G.993.2 Annex A/B/C
# G.993.5 Annex A/B/C
xtse_xdsl_a="05_01_04_00_4C_01_04_07"
# G.992.1 Annex B
# G.992.3 Annex B
# G.992.5 Annex B
# G.993.2 Annex A/B/C
# G.993.5 Annex A/B/C
xtse_xdsl_b="10_00_10_00_00_04_00_07"
# G.992.1 Annex B
# G.992.3 Annex B
# G.992.3 Annex J
# G.992.5 Annex B
# G.992.5 Annex J
# G.993.2 Annex A/B/C
# G.993.5 Annex A/B/C
xtse_xdsl_j="10_00_10_40_00_04_01_07"
# G.992.1 Annex B
xtse_xdsl_bdmt="10_00_00_00_00_00_00_00"
# G.992.3 Annex B
xtse_xdsl_b2="00_00_10_00_00_00_00_00"
# G.992.5 Annex B
xtse_xdsl_b2p="00_00_00_00_00_04_00_00"
# ANSI T1.413
xtse_xdsl_at1="01_00_00_00_00_00_00_00"
# G.992.2 Annex A
xtse_xdsl_alite="00_01_00_00_00_00_00_00"
# G.992.1 Annex A
xtse_xdsl_admt="04_00_00_00_00_00_00_00"
# G.992.3 Annex A
xtse_xdsl_a2="00_00_04_00_00_00_00_00"
# G.992.5 Annex A
xtse_xdsl_a2p="00_00_00_00_00_01_00_00"
# G.992.3 Annex L
xtse_xdsl_l="00_00_00_00_0C_00_00_00"
# G.992.3 Annex M
# G.992.5 Annex M
xtse_xdsl_m="00_00_00_00_40_00_04_00"
# G.992.3 Annex M
xtse_xdsl_m2="00_00_00_00_40_00_00_00"
# G.992.5 Annex M
xtse_xdsl_m2p="00_00_00_00_00_00_04_00"
#
# ITU-T G.994.1 (06/2012) - Table 2 (Mandatory carrier sets)
#
# A43
tone_adsl_a="0x142" # A43C + J43 + A43
tone_vdsl_a="0x142" # A43C + J43 + A43
# A43 + V43
tone_adsl_av="0x142" # A43C + J43 + A43
tone_vdsl_av="0x146" # A43C + J43 + A43 + V43
# B43
tone_adsl_b="0x81" # B43 + B43c
tone_vdsl_b="0x1" # B43
# B43 + V43
tone_adsl_bv="0x81" # B43 + B43c
tone_vdsl_bv="0x5" # B43 + V43
# create ADSL autoboot script. Used for SNR margin tweak
autoboot_script() {
echo "[WaitForConfiguration]={
locs 0 $1
}
[WaitForLinkActivate]={
}
[WaitForRestart]={
}
[Common]={
}" > /tmp/dsl.scr
}
lowlevel_cfg() {
echo "# VRX Low Level Configuration File
#
# Parameters must be separated by tabs or spaces.
# Empty lines and comments will be ignored.
#
# nFilter
#
# NA = -1
# OFF = 0
# ISDN = 1
# POTS = 2
# POTS_2 = 3
# POTS_3 = 4
#
# (dec)
-1
# nHsToneGroupMode nHsToneGroup_A nHsToneGroup_V nHsToneGroup_AV
#
# NA = -1 NA = -1 see see
# AUTO = 0 VDSL2_B43 = 0x0001 nHsToneGroup_A nHsToneGroup_A
# MANUAL = 1 VDSL2_A43 = 0x0002
# VDSL2_V43 = 0x0004
# VDSL1_V43P = 0x0008
# VDSL1_V43I = 0x0010
# ADSL1_C43 = 0x0020
# ADSL2_J43 = 0x0040
# ADSL2_B43C = 0x0080
# ADSL2_A43C = 0x0100
#
# (dec) (hex) (hex) (hex)
1 $1 $2 0x0
# nBaseAddr nIrqNum
#
# (hex) (dec)
0x1e116000 63
# nUtopiaPhyAdr nUtopiaBusWidth nPosPhyParity
# default(16b) = 0 NA = -1
# 8-bit = 1 ODD = 0
# 16-bit = 2
#
#
# (hex) (dec) (dec)
0xFF 0 0
# bNtrEnable
#
# (dec)
0" > /tmp/lowlevel.cfg
}
service_triggers() {
procd_add_reload_trigger network
}
start_service() {
local annex
local firmware
local tone
local tone_adsl
local tone_vdsl
local xtse
local xfer_mode
local line_mode
local tc_layer
local mode
local lowlevel
local snr
config_load network
config_get tone dsl tone
config_get annex dsl annex
config_get firmware dsl firmware
config_get xfer_mode dsl xfer_mode
config_get line_mode dsl line_mode
config_get snr dsl ds_snr_offset
eval "xtse=\"\${xtse_xdsl_$annex}\""
case "${xfer_mode}" in
atm)
tc_layer="-T1:0x1:0x1_1:0x1:0x1"
;;
ptm)
tc_layer="-T2:0x1:0x1_2:0x1:0x1"
;;
esac
case "${line_mode}" in
adsl)
mode="-M1"
# mask out VDSL bits when ADSL is requested
xtse="${xtse%_*}_00"
;;
vdsl)
mode="-M2"
# mask out ADSL bits when VDSL is requested
xtse="00_00_00_00_00_00_00_${xtse##*_}"
;;
esac
local annexgpio="/sys/class/gpio/annex"
if [ -d "${annexgpio}a" ] && [ -d "${annexgpio}b" ]; then
case "${annex}" in
a*|l*|m*)
echo 1 > "${annexgpio}a/value"
echo 0 > "${annexgpio}b/value"
;;
b*|j*)
echo 0 > "${annexgpio}a/value"
echo 1 > "${annexgpio}b/value"
;;
esac
fi
if [ -z "${firmware}" ]; then
# search for the firmware provided by dsl-vrx200-firmware-xdsl-*
if grep -qE "system type.*: (VR9|xRX200)" /proc/cpuinfo; then
case "${annex}" in
a*|l*|m*)
if [ -f "/lib/firmware/lantiq-vrx200-a.bin" ]; then
firmware="/lib/firmware/lantiq-vrx200-a.bin"
elif [ -f "/tmp/lantiq-vrx200-a.bin" ]; then
firmware="/tmp/lantiq-vrx200-a.bin"
elif [ -f "/lib/firmware/lantiq-vrx200-b.bin" ] && [ -f "/lib/firmware/lantiq-vrx200-b-to-a.bspatch" ]; then
bspatch /lib/firmware/lantiq-vrx200-b.bin \
/tmp/lantiq-vrx200-a.bin \
/lib/firmware/lantiq-vrx200-b-to-a.bspatch
firmware="/tmp/lantiq-vrx200-a.bin"
else
echo "firmware for annex a not found"
return 1
fi
;;
b*|j*)
if [ -f "/lib/firmware/vr9_dsl_fw_annex_b.bin" ]; then
firmware="/lib/firmware/vr9_dsl_fw_annex_b.bin"
elif [ -f "/lib/firmware/lantiq-vrx200-b.bin" ]; then
firmware="/lib/firmware/lantiq-vrx200-b.bin"
elif [ -f "/tmp/lantiq-vrx200-b.bin" ]; then
firmware="/tmp/lantiq-vrx200-b.bin"
elif [ -f "/lib/firmware/lantiq-vrx200-a.bin" ] && [ -f "/lib/firmware/lantiq-vrx200-a-to-b.bspatch" ]; then
bspatch /lib/firmware/lantiq-vrx200-a.bin \
/tmp/lantiq-vrx200-b.bin \
/lib/firmware/lantiq-vrx200-a-to-b.bspatch
firmware="/tmp/lantiq-vrx200-b.bin"
else
echo "firmware for annex b not found"
return 1
fi
;;
*)
echo "annex type not supported use a or b"
return 1
;;
esac
fi
fi
[ -z "${firmware}" ] && firmware=/lib/firmware/vdsl.bin
[ -f "${firmware}" ] || {
echo failed to find $firmware
return 1
}
eval "tone_adsl=\"\${tone_adsl_$tone}\""
eval "tone_vdsl=\"\${tone_vdsl_$tone}\""
[ -n "${tone_adsl}" ] && [ -n "${tone_vdsl}" ] && {
lowlevel_cfg "${tone_adsl}" "${tone_vdsl}"
lowlevel="-l /tmp/lowlevel.cfg"
}
[ -z "${snr}" ] || {
# for SNR offset setting
autoboot_script "$snr"
autoboot="-a /tmp/dsl.scr -A /tmp/dsl.scr"
}
procd_open_instance
procd_set_param command /sbin/vdsl_cpe_control \
-i$xtse \
-n /sbin/dsl_notify.sh \
-f ${firmware} \
$lowlevel \
${mode} \
${tc_layer} \
$autoboot
procd_close_instance
}
stop_service() {
# do not use dsl_cmd to not block when this is locked up by some other proess
echo quit > /tmp/pipe/dsl_cpe0_cmd
DSL_NOTIFICATION_TYPE="DSL_INTERFACE_STATUS" \
DSL_INTERFACE_STATUS="DOWN" \
/sbin/dsl_notify.sh
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.ComponentModel;
namespace Nethermind.TxPool
{
public static class Metrics
{
[Description("Number of pending transactions broadcasted to peers.")]
public static long PendingTransactionsSent { get; set; }
[Description("Number of pending transactions received from peers.")]
public static long PendingTransactionsReceived { get; set; }
[Description("Number of pending transactions received that were ignored.")]
public static long PendingTransactionsDiscarded { get; set; }
[Description("Number of known pending transactions.")]
public static long PendingTransactionsKnown { get; set; }
}
}
|
{
"pile_set_name": "Github"
}
|
// Mantid Repository : https://github.com/mantidproject/mantid
//
// Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
// NScD Oak Ridge National Laboratory, European Spallation Source,
// Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
// SPDX - License - Identifier: GPL - 3.0 +
#pragma once
#include <cxxtest/TestSuite.h>
#include "MantidDataObjects/MDHistoWorkspace.h"
#include "MantidMDAlgorithms/CompactMD.h"
#include "MantidTestHelpers/MDEventsTestHelper.h"
using Mantid::MDAlgorithms::CompactMD;
using namespace Mantid::API;
//==================
// Functional Tests
//==================
class CompactMDTest : public CxxTest::TestSuite {
public:
// This pair of boilerplate methods prevent the suite being created statically
// This means the constructor isn't called when running other tests
static CompactMDTest *createSuite() { return new CompactMDTest(); }
static void destroySuite(CompactMDTest *suite) { delete suite; }
void test_Init() {
CompactMD alg;
TSM_ASSERT_THROWS_NOTHING("Instance of CompactMD threw: ",
alg.initialize());
TSM_ASSERT("Instance of CompactMD was not initialised: ",
alg.isInitialized());
}
void
test_all_non_zero_signals_are_kept_with_data_concentrated_in_the_centre() {
/*
*testing the effectiveness of CompactMD when the data looks like this:
*------------------
* Input structure:
*------------------
* -------------
* | | |///| | |
* ---------------------
* -5-4-3 2-1 0 1 2 3 4 5
*---------------------------
* Expected output structure:
*----------------------------
* should trim until the first non-zero value.
* -----
* |///|
* -----
* -1 0 1
*/
using namespace Mantid::DataObjects;
const size_t numDims = 1;
const double signal = 0.0;
const double errorSquared = 1.3;
size_t numBins[static_cast<int>(numDims)] = {5};
Mantid::coord_t min[static_cast<int>(numDims)] = {-5};
Mantid::coord_t max[static_cast<int>(numDims)] = {5};
const std::string name("test");
auto inWS = MDEventsTestHelper::makeFakeMDHistoWorkspaceGeneral(
numDims, signal, errorSquared, numBins, min, max, name);
inWS->setSignalAt(2, 1.0); // set middle bin signal to one
CompactMD alg;
alg.setChild(true);
alg.setRethrows(true);
alg.initialize();
alg.setProperty("InputWorkspace", inWS);
alg.setProperty("OutputWorkspace", "out");
alg.execute();
// output workspace should be cropped so extents ~ [-1,1]
IMDHistoWorkspace_sptr outputWorkspace = alg.getProperty("OutputWorkspace");
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(0), 1);
TSM_ASSERT_EQUALS("Minimum should be cropped to -1: ",
outputWorkspace->getDimension(0)->getMinimum(), -1.0);
TSM_ASSERT_EQUALS("Maximum should be cropped to 1: ",
outputWorkspace->getDimension(0)->getMaximum(), 1.0);
TSM_ASSERT_EQUALS("Number of Bins should be 1 : ",
outputWorkspace->getDimension(0)->getNBins(), 1.0);
TSM_ASSERT_EQUALS("Bin width should be consistent: ",
outputWorkspace->getDimension(0)->getBinWidth(),
inWS->getDimension(0)->getBinWidth());
}
void test_all_non_zero_signals_are_kept_with_data_in_each_corner() {
/*
*testing the effectiveness of CompactMD when the data looks like this:
*-----------------------------------
* Input structure: 2D HistoWorkspace
*-----------------------------------
* ------------- -3
* |/a/| |/b/| -2
* ------------- -1
* | | | | 0
* ------------- 1
* |/c/| |/d/| 2
* ------------- 3
* -3-2-1 0 1 2 3
*----------------------------
* Expected output structure:
*----------------------------
* should not trim the workspace at all.
* ------------- -3
* |/a/| |/b/| -2
* ------------- -1
* | | | | 0
* ------------- 1
* |/c/| |/d/| 2
* ------------- 3
* -3-2-1 0 1 2 3
*/
using namespace Mantid::DataObjects;
const size_t numDims = 2;
const double signal = 0.0;
const double errorSquared = 1.2;
size_t numBins[static_cast<int>(numDims)] = {3, 3};
Mantid::coord_t min[static_cast<int>(numDims)] = {-3, -3};
Mantid::coord_t max[static_cast<int>(numDims)] = {3, 3};
const std::string name("test");
auto inWS = MDEventsTestHelper::makeFakeMDHistoWorkspaceGeneral(
numDims, signal, errorSquared, numBins, min, max, name);
inWS->setSignalAt(0, 1.0); // cell a
inWS->setSignalAt(2, 1.0); // cell b
inWS->setSignalAt(6, 1.0); // cell c
inWS->setSignalAt(8, 1.0); // cell d
CompactMD alg;
alg.setChild(true);
alg.setRethrows(true);
alg.initialize();
alg.setProperty("InputWorkspace", inWS);
alg.setProperty("OutputWorkspace", "out");
alg.execute();
IMDHistoWorkspace_sptr outputWorkspace = alg.getProperty("OutputWorkspace");
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(0), 1);
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(2), 1);
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(6), 1);
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(8), 1);
TSM_ASSERT_EQUALS("Minimum for dim 0 should be consistent: ",
outputWorkspace->getDimension(0)->getMinimum(),
inWS->getDimension(0)->getMinimum());
TSM_ASSERT_EQUALS("Maximum for dim 0 should be consistent: ",
outputWorkspace->getDimension(0)->getMaximum(),
inWS->getDimension(0)->getMaximum());
TSM_ASSERT_EQUALS("Minimum for dim 1 should be consistent:",
outputWorkspace->getDimension(1)->getMinimum(),
inWS->getDimension(1)->getMinimum());
TSM_ASSERT_EQUALS("Maximum for dim 1 should be consistent: ",
outputWorkspace->getDimension(1)->getMaximum(),
inWS->getDimension(1)->getMaximum());
TSM_ASSERT_EQUALS("Number of Bins for dim 0 should be consistent : ",
outputWorkspace->getDimension(0)->getNBins(),
inWS->getDimension(0)->getNBins());
TSM_ASSERT_EQUALS("Number of Bins for dim 1 should be consistent : ",
outputWorkspace->getDimension(1)->getNBins(),
inWS->getDimension(1)->getNBins());
TSM_ASSERT_EQUALS("Bin width for dim 0 should be consistent: ",
outputWorkspace->getDimension(0)->getBinWidth(),
inWS->getDimension(0)->getBinWidth());
TSM_ASSERT_EQUALS("Bin width for dim 1 should be consistent: ",
outputWorkspace->getDimension(1)->getBinWidth(),
inWS->getDimension(1)->getBinWidth());
}
void
test_all_non_zero_signals_are_kept_when_data_is_concentrated_in_one_half_of_the_workspace() {
/*
*testing the effectiveness of CompactMD when the data looks like this:
*------------------
* Input structure:
*------------------
* -------------
* |///| | |
* -------------
* -3-2-1 0 1 2 3
*---------------------------
* Expected output structure:
*----------------------------
* should trim until the first non-zero value.
* -----
* |///|
* -----
* 1 2 3
*/
using namespace Mantid::DataObjects;
const size_t numDims = 1;
const double signal = 0.0;
const double errorSquared = 1.3;
size_t numBins[static_cast<int>(numDims)] = {3};
Mantid::coord_t min[static_cast<int>(numDims)] = {-3};
Mantid::coord_t max[static_cast<int>(numDims)] = {3};
const std::string name("test");
auto inWS = MDEventsTestHelper::makeFakeMDHistoWorkspaceGeneral(
numDims, signal, errorSquared, numBins, min, max, name);
inWS->setSignalAt(0, 1.0); // set right-most bin signal to one
CompactMD alg;
alg.setChild(true);
alg.setRethrows(true);
alg.initialize();
alg.setProperty("InputWorkspace", inWS);
alg.setProperty("OutputWorkspace", "out");
TS_ASSERT_THROWS_NOTHING(alg.execute());
IMDHistoWorkspace_sptr outputWorkspace = alg.getProperty("OutputWorkspace");
TS_ASSERT(outputWorkspace);
TSM_ASSERT_EQUALS(
"Should have a signal of 1.0: ", outputWorkspace->getSignalAt(0), 1);
TSM_ASSERT_EQUALS("Minimum should be cut to 1: ",
outputWorkspace->getDimension(0)->getMinimum(), -3.0);
TSM_ASSERT_EQUALS("Maximum should still be 3: ",
outputWorkspace->getDimension(0)->getMaximum(), -1.0);
TSM_ASSERT_EQUALS("Number of Bins should be 1 : ",
outputWorkspace->getDimension(0)->getNBins(), 1);
TSM_ASSERT_EQUALS("Bin width should be consistent: ",
outputWorkspace->getDimension(0)->getBinWidth(),
inWS->getDimension(0)->getBinWidth());
}
void test_compact_md_does_not_throw_when_loading_empty_workspace() {
using namespace Mantid::DataObjects;
const size_t numDims = 1;
const double signal = 0.0;
const double errorSquared = 1.3;
size_t numBins[static_cast<int>(numDims)] = {3};
Mantid::coord_t min[static_cast<int>(numDims)] = {-3};
Mantid::coord_t max[static_cast<int>(numDims)] = {3};
const std::string name("test");
auto inWS = MDEventsTestHelper::makeFakeMDHistoWorkspaceGeneral(
numDims, signal, errorSquared, numBins, min, max, name);
CompactMD alg;
alg.setChild(true);
alg.setRethrows(true);
alg.initialize();
alg.setProperty("InputWorkspace", inWS);
alg.setProperty("OutputWorkspace", "out");
TS_ASSERT_THROWS_NOTHING(alg.execute());
}
};
//===================
// Performance Tests
//===================
using namespace Mantid::DataObjects;
class CompactMDTestPerformance : public CxxTest::TestSuite {
private:
MDHistoWorkspace_sptr m_ws;
public:
// This pair of boilerplate methods prevent the suite being created statically
// This means the constructor isn't called when running other tests
static CompactMDTestPerformance *createSuite() {
return new CompactMDTestPerformance();
}
static void destroySuite(CompactMDTestPerformance *suite) { delete suite; }
void setUp() override {
// Create a 4D workspace.
const size_t numDims = 4;
const double signal = 0.0;
const double errorSquared = 1.2;
size_t numBins[static_cast<int>(numDims)] = {10, 20, 10, 20};
Mantid::coord_t min[static_cast<int>(numDims)] = {-5, -10, -5, -10};
Mantid::coord_t max[static_cast<int>(numDims)] = {5, 10, 5, 10};
const std::string name("test");
m_ws = MDEventsTestHelper::makeFakeMDHistoWorkspaceGeneral(
numDims, signal, errorSquared, numBins, min, max, name);
// setting signals like this for variety
auto iter = m_ws->createIterator();
do {
auto index = iter->getLinearIndex();
if (index % 2 == 0) {
m_ws->setSignalAt(index, 1.0);
}
} while (iter->next());
}
void test_execute_4d() {
CompactMD alg;
alg.setChild(true);
alg.setRethrows(true);
alg.initialize();
alg.setProperty("InputWorkspace", m_ws);
alg.setProperty("OutputWorkspace", "out");
alg.execute();
IMDHistoWorkspace_sptr outWS = alg.getProperty("OutputWorkspace");
TS_ASSERT(outWS);
}
};
|
{
"pile_set_name": "Github"
}
|
<?php
namespace Illuminate\Foundation\Testing\Concerns;
use Illuminate\Support\Str;
use Illuminate\Http\Request;
use Illuminate\Foundation\Testing\TestResponse;
use Illuminate\Contracts\Http\Kernel as HttpKernel;
use Symfony\Component\HttpFoundation\Request as SymfonyRequest;
use Symfony\Component\HttpFoundation\File\UploadedFile as SymfonyUploadedFile;
trait MakesHttpRequests
{
/**
* Additional headers for the request.
*
* @var array
*/
protected $defaultHeaders = [];
/**
* Additional server variables for the request.
*
* @var array
*/
protected $serverVariables = [];
/**
* Indicates whether redirects should be followed.
*
* @var bool
*/
protected $followRedirects = false;
/**
* Define additional headers to be sent with the request.
*
* @param array $headers
* @return $this
*/
public function withHeaders(array $headers)
{
$this->defaultHeaders = array_merge($this->defaultHeaders, $headers);
return $this;
}
/**
* Add a header to be sent with the request.
*
* @param string $name
* @param string $value
* @return $this
*/
public function withHeader(string $name, string $value)
{
$this->defaultHeaders[$name] = $value;
return $this;
}
/**
* Flush all the configured headers.
*
* @return $this
*/
public function flushHeaders()
{
$this->defaultHeaders = [];
return $this;
}
/**
* Define a set of server variables to be sent with the requests.
*
* @param array $server
* @return $this
*/
public function withServerVariables(array $server)
{
$this->serverVariables = $server;
return $this;
}
/**
* Disable middleware for the test.
*
* @param string|array $middleware
* @return $this
*/
public function withoutMiddleware($middleware = null)
{
if (is_null($middleware)) {
$this->app->instance('middleware.disable', true);
return $this;
}
foreach ((array) $middleware as $abstract) {
$this->app->instance($abstract, new class {
public function handle($request, $next)
{
return $next($request);
}
});
}
return $this;
}
/**
* Enable the given middleware for the test.
*
* @param string|array $middleware
* @return $this
*/
public function withMiddleware($middleware = null)
{
if (is_null($middleware)) {
unset($this->app['middleware.disable']);
return $this;
}
foreach ((array) $middleware as $abstract) {
unset($this->app[$abstract]);
}
return $this;
}
/**
* Automatically follow any redirects returned from the response.
*
* @return $this
*/
public function followingRedirects()
{
$this->followRedirects = true;
return $this;
}
/**
* Set the referer header to simulate a previous request.
*
* @param string $url
* @return $this
*/
public function from(string $url)
{
return $this->withHeader('referer', $url);
}
/**
* Visit the given URI with a GET request.
*
* @param string $uri
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function get($uri, array $headers = [])
{
$server = $this->transformHeadersToServerVars($headers);
return $this->call('GET', $uri, [], [], [], $server);
}
/**
* Visit the given URI with a GET request, expecting a JSON response.
*
* @param string $uri
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function getJson($uri, array $headers = [])
{
return $this->json('GET', $uri, [], $headers);
}
/**
* Visit the given URI with a POST request.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function post($uri, array $data = [], array $headers = [])
{
$server = $this->transformHeadersToServerVars($headers);
return $this->call('POST', $uri, $data, [], [], $server);
}
/**
* Visit the given URI with a POST request, expecting a JSON response.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function postJson($uri, array $data = [], array $headers = [])
{
return $this->json('POST', $uri, $data, $headers);
}
/**
* Visit the given URI with a PUT request.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function put($uri, array $data = [], array $headers = [])
{
$server = $this->transformHeadersToServerVars($headers);
return $this->call('PUT', $uri, $data, [], [], $server);
}
/**
* Visit the given URI with a PUT request, expecting a JSON response.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function putJson($uri, array $data = [], array $headers = [])
{
return $this->json('PUT', $uri, $data, $headers);
}
/**
* Visit the given URI with a PATCH request.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function patch($uri, array $data = [], array $headers = [])
{
$server = $this->transformHeadersToServerVars($headers);
return $this->call('PATCH', $uri, $data, [], [], $server);
}
/**
* Visit the given URI with a PATCH request, expecting a JSON response.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function patchJson($uri, array $data = [], array $headers = [])
{
return $this->json('PATCH', $uri, $data, $headers);
}
/**
* Visit the given URI with a DELETE request.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function delete($uri, array $data = [], array $headers = [])
{
$server = $this->transformHeadersToServerVars($headers);
return $this->call('DELETE', $uri, $data, [], [], $server);
}
/**
* Visit the given URI with a DELETE request, expecting a JSON response.
*
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function deleteJson($uri, array $data = [], array $headers = [])
{
return $this->json('DELETE', $uri, $data, $headers);
}
/**
* Call the given URI with a JSON request.
*
* @param string $method
* @param string $uri
* @param array $data
* @param array $headers
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function json($method, $uri, array $data = [], array $headers = [])
{
$files = $this->extractFilesFromDataArray($data);
$content = json_encode($data);
$headers = array_merge([
'CONTENT_LENGTH' => mb_strlen($content, '8bit'),
'CONTENT_TYPE' => 'application/json',
'Accept' => 'application/json',
], $headers);
return $this->call(
$method, $uri, [], [], $files, $this->transformHeadersToServerVars($headers), $content
);
}
/**
* Call the given URI and return the Response.
*
* @param string $method
* @param string $uri
* @param array $parameters
* @param array $cookies
* @param array $files
* @param array $server
* @param string $content
* @return \Illuminate\Foundation\Testing\TestResponse
*/
public function call($method, $uri, $parameters = [], $cookies = [], $files = [], $server = [], $content = null)
{
$kernel = $this->app->make(HttpKernel::class);
$files = array_merge($files, $this->extractFilesFromDataArray($parameters));
$symfonyRequest = SymfonyRequest::create(
$this->prepareUrlForRequest($uri), $method, $parameters,
$cookies, $files, array_replace($this->serverVariables, $server), $content
);
$response = $kernel->handle(
$request = Request::createFromBase($symfonyRequest)
);
if ($this->followRedirects) {
$response = $this->followRedirects($response);
}
$kernel->terminate($request, $response);
return $this->createTestResponse($response);
}
/**
* Turn the given URI into a fully qualified URL.
*
* @param string $uri
* @return string
*/
protected function prepareUrlForRequest($uri)
{
if (Str::startsWith($uri, '/')) {
$uri = substr($uri, 1);
}
if (! Str::startsWith($uri, 'http')) {
$uri = config('app.url').'/'.$uri;
}
return trim($uri, '/');
}
/**
* Transform headers array to array of $_SERVER vars with HTTP_* format.
*
* @param array $headers
* @return array
*/
protected function transformHeadersToServerVars(array $headers)
{
return collect(array_merge($this->defaultHeaders, $headers))->mapWithKeys(function ($value, $name) {
$name = strtr(strtoupper($name), '-', '_');
return [$this->formatServerHeaderKey($name) => $value];
})->all();
}
/**
* Format the header name for the server array.
*
* @param string $name
* @return string
*/
protected function formatServerHeaderKey($name)
{
if (! Str::startsWith($name, 'HTTP_') && $name != 'CONTENT_TYPE' && $name != 'REMOTE_ADDR') {
return 'HTTP_'.$name;
}
return $name;
}
/**
* Extract the file uploads from the given data array.
*
* @param array $data
* @return array
*/
protected function extractFilesFromDataArray(&$data)
{
$files = [];
foreach ($data as $key => $value) {
if ($value instanceof SymfonyUploadedFile) {
$files[$key] = $value;
unset($data[$key]);
}
if (is_array($value)) {
$files[$key] = $this->extractFilesFromDataArray($value);
$data[$key] = $value;
}
}
return $files;
}
/**
* Follow a redirect chain until a non-redirect is received.
*
* @param \Illuminate\Http\Response $response
* @return \Illuminate\Http\Response
*/
protected function followRedirects($response)
{
while ($response->isRedirect()) {
$response = $this->get($response->headers->get('Location'));
}
$this->followRedirects = false;
return $response;
}
/**
* Create the test response instance from the given response.
*
* @param \Illuminate\Http\Response $response
* @return \Illuminate\Foundation\Testing\TestResponse
*/
protected function createTestResponse($response)
{
return TestResponse::fromBaseResponse($response);
}
}
|
{
"pile_set_name": "Github"
}
|
# Building `sys/unix`
The sys/unix package provides access to the raw system call interface of the
underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
Porting Go to a new architecture/OS combination or adding syscalls, types, or
constants to an existing architecture/OS pair requires some manual effort;
however, there are tools that automate much of the process.
## Build Systems
There are currently two ways we generate the necessary files. We are currently
migrating the build system to use containers so the builds are reproducible.
This is being done on an OS-by-OS basis. Please update this documentation as
components of the build system change.
### Old Build System (currently for `GOOS != "linux"`)
The old build system generates the Go files based on the C header files
present on your system. This means that files
for a given GOOS/GOARCH pair must be generated on a system with that OS and
architecture. This also means that the generated code can differ from system
to system, based on differences in the header files.
To avoid this, if you are using the old build system, only generate the Go
files on an installation with unmodified header files. It is also important to
keep track of which version of the OS the files were generated from (ex.
Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
and have each OS upgrade correspond to a single change.
To build the files for your current OS and architecture, make sure GOOS and
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
your specific system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, go
### New Build System (currently for `GOOS == "linux"`)
The new build system uses a Docker container to generate the go files directly
from source checkouts of the kernel and various system libraries. This means
that on any platform that supports Docker, all the files using the new build
system can be generated at once, and generated files will not change based on
what the person running the scripts has installed on their computer.
The OS specific files for the new build system are located in the `${GOOS}`
directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
the kernel or system library updates, modify the Dockerfile at
`${GOOS}/Dockerfile` to checkout the new release of the source.
To build all the files under the new build system, you must be on an amd64/Linux
system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, go, docker
## Component files
This section describes the various files used in the code generation process.
It also contains instructions on how to modify these files to add a new
architecture/OS or to add additional syscalls, types, or constants. Note that
if you are using the new build system, the scripts/programs cannot be called normally.
They must be called from within the docker container.
### asm files
The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
call dispatch. There are three entry points:
```
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
```
The first and second are the standard ones; they differ only in how many
arguments can be passed to the kernel. The third is for low-level use by the
ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
let it know that a system call is running.
When porting Go to an new architecture/OS, this file must be implemented for
each GOOS/GOARCH pair.
### mksysnum
Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
for the old system). This program takes in a list of header files containing the
syscall number declarations and parses them to produce the corresponding list of
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
constants.
Adding new syscall numbers is mostly done by running the build on a sufficiently
new installation of the target OS (or updating the source checkouts for the
new build system). However, depending on the OS, you make need to update the
parsing in mksysnum.
### mksyscall.go
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
hand-written Go files which implement system calls (for unix, the specific OS,
or the specific OS/Architecture pair respectively) that need special handling
and list `//sys` comments giving prototypes for ones that can be generated.
The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
them into syscalls. This requires the name of the prototype in the comment to
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
prototype can be exported (capitalized) or not.
Adding a new syscall often just requires adding a new `//sys` function prototype
with the desired arguments and a capitalized name so it is exported. However, if
you want the interface to the syscall to be different, often one will make an
unexported `//sys` prototype, an then write a custom wrapper in
`syscall_${GOOS}.go`.
### types files
For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
`types_${GOOS}.go` on the old system). This file includes standard C headers and
creates Go type aliases to the corresponding C types. The file is then fed
through godef to get the Go compatible definitions. Finally, the generated code
is fed though mkpost.go to format the code correctly and remove any hidden or
private identifiers. This cleaned-up code is written to
`ztypes_${GOOS}_${GOARCH}.go`.
The hardest part about preparing this file is figuring out which headers to
include and which symbols need to be `#define`d to get the actual data
structures that pass through to the kernel system calls. Some C libraries
preset alternate versions for binary compatibility and translate them on the
way in and out of system calls, but there is almost always a `#define` that can
get the real ones.
See `types_darwin.go` and `linux/types.go` for examples.
To add a new type, add in the necessary include statement at the top of the
file (if it is not already there) and add in a type alias line. Note that if
your type is significantly different on different architectures, you may need
some `#if/#elif` macros in your include statements.
### mkerrors.sh
This script is used to generate the system's various constants. This doesn't
just include the error numbers and error strings, but also the signal numbers
an a wide variety of miscellaneous constants. The constants come from the list
of include files in the `includes_${uname}` variable. A regex then picks out
the desired `#define` statements, and generates the corresponding Go constants.
The error numbers and strings are generated from `#include <errno.h>`, and the
signal numbers and strings are generated from `#include <signal.h>`. All of
these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
`_errors.c`, which prints out all the constants.
To add a constant, add the header that includes it to the appropriate variable.
Then, edit the regex (if necessary) to match the desired constant. Avoid making
the regex too broad to avoid matching unintended constants.
## Generated files
### `zerror_${GOOS}_${GOARCH}.go`
A file containing all of the system's generated error numbers, error strings,
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
### `zsyscall_${GOOS}_${GOARCH}.go`
A file containing all the generated syscalls for a specific GOOS and GOARCH.
Generated by `mksyscall.go` (see above).
### `zsysnum_${GOOS}_${GOARCH}.go`
A list of numeric constants for all the syscall number of the specific GOOS
and GOARCH. Generated by mksysnum (see above).
### `ztypes_${GOOS}_${GOARCH}.go`
A file containing Go types for passing into (or returning from) syscalls.
Generated by godefs and the types file (see above).
|
{
"pile_set_name": "Github"
}
|
---
title: StorSimple 8000 シリーズ デバイスへの Update 5.1 のインストール | Microsoft Docs
description: StorSimple 8000 シリーズ デバイスに StorSimple 8000 シリーズの Update 5.1 をインストールする方法について説明します。
services: storsimple
documentationcenter: NA
author: twooley
ms.assetid: ''
ms.service: storsimple
ms.devlang: NA
ms.topic: how-to
ms.tgt_pltfrm: NA
ms.workload: TBD
ms.date: 03/05/2020
ms.author: twooley
ms.openlocfilehash: f9cc5181d6cc29ee4b3c2373dbbc91d6290fbe6e
ms.sourcegitcommit: 9c3cfbe2bee467d0e6966c2bfdeddbe039cad029
ms.translationtype: HT
ms.contentlocale: ja-JP
ms.lasthandoff: 08/24/2020
ms.locfileid: "88782772"
---
# <a name="install-update-51-on-your-storsimple-device"></a>StorSimple デバイスへの Update 5.1 のインストール
## <a name="overview"></a>概要
このチュートリアルでは、Update 5.1 より前のソフトウェア バージョンを実行している StorSimple デバイスに、Azure portal 経由で Update 5.1 をインストールする方法について説明します。 <!--The hotfix method is used when you are trying to install Update 5.1 on a device running pre-Update 3 versions. The hotfix method is also used when a gateway is configured on a network interface other than DATA 0 of the StorSimple device and you are trying to update from a pre-Update 1 software version.-->
Update 5.1 には、中断なしのセキュリティ更新プログラムが含まれています。 中断なしまたは通常の更新プログラムは、Azure portal を使用して適用できます <!--or by the hotfix method-->。
> [!IMPORTANT]
>
> * Update 5.1 は必須の更新プログラムであり、すぐにインストールする必要があります。 詳細については、[Update 5.1 リリース ノート](storsimple-update51-release-notes.md)に関する記事を参照してください。
> * インストールの前に、ハードウェアの状態とネットワーク接続の点からデバイスの正常性を判断するための手動と自動の一連の事前チェックが行われます。 これらの事前チェックは、Azure portal から更新プログラムを適用する場合にのみ実行されます。
> * 修正プログラムによる方法を使用してインストールする場合は、[Microsoft サポート](mailto:support@microsoft.com)にお問い合わせください。
<!--
> * We strongly recommend that when updating a device running versions prior to Update 3, you install the updates using hotfix method. If you encounter any issues, [log a support ticket](storsimple-8000-contact-microsoft-support.md).
> * We recommend that you install the software and other regular updates via the Azure portal. You should only go to the Windows PowerShell interface of the device (to install updates) if the pre-update gateway check fails in the portal. Depending upon the version you are updating from, the updates may take 4 hours (or greater) to install. The maintenance mode updates must be installed through the Windows PowerShell interface of the device. As maintenance mode updates are disruptive updates, these result in a down time for your device.
> * If running the optional StorSimple Snapshot Manager, ensure that you have upgraded your Snapshot Manager version to Update 5.1 prior to updating the device.
-->
[!INCLUDE [storsimple-preparing-for-update](../../includes/storsimple-preparing-for-updates.md)]
## <a name="install-update-51-through-the-azure-portal"></a>Azure portal を使用して Update 5.1 をインストールする
デバイスを [Update 5.1](storsimple-update51-release-notes.md) に更新するには、次の手順を実行します。
> [!NOTE]
> Microsoft はデバイスから追加の診断情報を取得します。 その結果、Microsoft の運用チームが問題のあるデバイスを識別したときに、デバイスから情報を収集して問題を診断する能力が向上します。
#### <a name="to-install-an-update-from-the-azure-portal"></a>Azure ポータルから 更新プログラムをインストールするには
1. StorSimple サービス ページでデバイスを選択します。

2. **[デバイスの設定]** > **[デバイスの更新プログラム]** の順に移動します。
![[デバイスの更新プログラム] をクリック](./media/storsimple-8000-install-update-51/update2.png)
3. 新しい更新プログラムが利用できる場合は、通知が表示されます。 または、 **[デバイスの更新プログラム]** ブレードで **[更新プログラムのスキャン]** をクリックします。 利用可能な更新プログラムをスキャンするジョブが作成されます。 ジョブが正常に完了すると、その旨が通知されます。
![[デバイスの更新プログラム] をクリック](./media/storsimple-8000-install-update-51/update3.png)
4. 更新プログラムをデバイスに適用する前に、リリース ノートを確認することをお勧めします。 **[更新プログラムのインストール]** をクリックすると、更新プログラムが適用されます。 **[定期更新プログラムの確認]** ブレードで、更新プログラムを適用する前に完了する必要のある前提条件を確認します。 デバイスを更新する準備ができたことを示すチェック ボックスをオンにし、 **[インストール]** をクリックします。
![[デバイスの更新プログラム] をクリック](./media/storsimple-8000-install-update-51/update4.png)
5. 一連の前提条件のチェックが開始されます。 これらのチェックは次のとおりです。
* **コントローラーの正常性チェック** では、両方のデバイス コントローラーが正常であり、オンラインであることを確認します。
* **ハードウェア コンポーネントの正常性チェック** では、StorSimple デバイスのすべてのハードウェア コンポーネントが正常であることを確認します。
* **DATA 0 チェック** では、デバイスで DATA 0 が有効であることを確認します。 このインターフェイスが有効でない場合は、有効にしてから再試行する必要があります。
すべてのチェックが正常に完了した場合にのみ、更新プログラムがダウンロードされてインストールされます。 チェックが実行中のときは通知されます。 事前チェックに失敗した場合、失敗の理由が表示されます。 それらの問題を解決してから操作をやり直してください。 これらの問題に自分で対処できない場合、Microsoft サポートに連絡することが必要になる場合があります。
7. 事前チェックが正常に完了したら、更新ジョブが作成されます。 更新ジョブが正常に作成されると、通知されます。

その後、更新プログラムがデバイスに適用されます。
9. 更新の完了には数時間かかります。 更新ジョブを選択し、 **[詳細]** をクリックすると、ジョブの詳細をいつでも表示できます。

**[デバイスの設定]、[ジョブ]** の順に移動して、更新ジョブの進行状況を監視することもできます。 **[ジョブ]** ブレードで、更新の進行状況を確認できます。

10. ジョブが完了したら、 **[デバイスの設定]、[デバイスの更新プログラム]** の順に移動します。 ソフトウェアのバージョンが更新されています。
デバイスで **StorSimple 8000 Series Update 5.1 (6.3.9600.17885)** が実行されていることを確認します。 **[最終更新日]** が変更されています。
<!-- 5.1 - KB 4542887-->
<!--You will now see that the Maintenance mode updates are available (this message might continue to be displayed for up to 24 hours after you install the updates). The steps to install maintenance mode update are detailed in the next section.
[!INCLUDE [storsimple-8000-install-maintenance-mode-updates](../../includes/storsimple-8000-install-maintenance-mode-updates.md)]
## Install Update 5.1 as a hotfix
The software versions that can be upgraded using the hotfix method are:
* Update 0.1, 0.2, 0.3
* Update 1, 1.1, 1.2
* Update 2, 2.1, 2.2
* Update 3, 3.1
* Update 4
* Update 5
> [!NOTE]
> The recommended method to install Update 5.1 is through the Azure portal when trying to update from Update 3 and later version. When updating a device running versions prior to Update 3, use this procedure. You can also use this procedure if you fail the gateway check when trying to install the updates through the Azure portal. The check fails when you have a gateway assigned to a non-DATA 0 network interface and your device is running a software version earlier than Update 1.
The hotfix method involves the following three steps:
1. Download the hotfixes from the Microsoft Update Catalog.
2. Install and verify the regular mode hotfixes.
3. Install and verify the maintenance mode hotfix.
#### Download updates for your device
You must download and install the following hotfixes in the prescribed order and the suggested folders:
| Order | KB | Description | Update type | Install time |Install in folder|
| --- | --- | --- | --- | --- | --- |
| 1. |KB4037264 |Software update<br> Download both _HcsSoftwareUpdate.exe_ and _CisMSDAgent.exe_ |Regular <br></br>Non-disruptive |~ 25 mins |FirstOrderUpdate|
If updating from a device running Update 4, you only need to install the OS cumulative updates as second order updates.
| Order | KB | Description | Update type | Install time |Install in folder|
| --- | --- | --- | --- | --- | --- |
| 2A. |KB4025336 |OS cumulative updates package <br> Download Windows Server 2012 R2 version |Regular <br></br>Non-disruptive |- |SecondOrderUpdate|
If installing from a device running Update 3 or earlier, install the following in addition to the cumulative updates.
| Order | KB | Description | Update type | Install time |Install in folder|
| --- | --- | --- | --- | --- | --- |
| 2B. |KB4011841 <br> KB4011842 |LSI driver and firmware updates <br> USM firmware update (version 3.38) |Regular <br></br>Non-disruptive |~ 3 hrs <br> (includes 2A. + 2B. + 2C.)|SecondOrderUpdate|
| 2C. |KB3139398 <br> KB3142030 <br> KB3108381 <br> KB3153704 <br> KB3174644 <br> KB3139914 |OS security updates package <br> Download Windows Server 2012 R2 version |Regular <br></br>Non-disruptive |- |SecondOrderUpdate|
| 2D. |KB3146621 <br> KB3103616 <br> KB3121261 <br> KB3123538 |OS updates package <br> Download Windows Server 2012 R2 version |Regular <br></br>Non-disruptive |- |SecondOrderUpdate|
You may also need to install disk firmware updates on top of all the updates shown in the preceding tables. You can verify whether you need the disk firmware updates by running the `Get-HcsFirmwareVersion` cmdlet. If you are running these firmware versions: `XMGJ`, `XGEG`, `KZ50`, `F6C2`, `VR08`, `N003`, `0107`, then you do not need to install these updates.
| Order | KB | Description | Update type | Install time | Install in folder|
| --- | --- | --- | --- | --- | --- |
| 3. |KB4037263 |Disk firmware |Maintenance <br></br>Disruptive |~ 30 mins | ThirdOrderUpdate |
<br></br>
> [!IMPORTANT]
> * If updating from Update 4, the total install time is close to 4 hours.
> * Before using this procedure to apply the update, make sure that both the device controllers are online and all the hardware components are healthy.
Perform the following steps to download and install the hotfixes.
[!INCLUDE [storsimple-install-update5-hotfix](../../includes/storsimple-install-update5-hotfix.md)]
-->
<!--
[!INCLUDE [storsimple-8000-install-troubleshooting](../../includes/storsimple-8000-install-troubleshooting.md)]
-->
## <a name="next-steps"></a>次のステップ
詳しくは、[Update 5.1 リリース](storsimple-update51-release-notes.md)に関するページをご覧ください。
|
{
"pile_set_name": "Github"
}
|
#include "create_empty_directed_graph_with_graph_name.h"
#include "get_graph_name.h"
#include "set_graph_name.h"
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_CASE(test_get_graph_name)
{
auto g = create_empty_directed_graph_with_graph_name();
const std::string name{ "Dex" };
set_graph_name(name, g);
BOOST_CHECK(get_graph_name(g) == name);
}
|
{
"pile_set_name": "Github"
}
|
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
#
## The above are the default comments for rc.local. For Circlean, rc.local has
## been modified to start the grooming process after booting.
clean(){
echo "GROOMER: Copy over, sleeping 15s before shutdown."
sleep 15
echo "GROOMER: rc.local done, shutting down."
/sbin/shutdown -P -h now
}
echo "GROOMER: end of boot, running rc.local."
if [ -e /dev/sda ]; then
if [ -e /dev/sdb ]; then
trap clean EXIT TERM INT
cd /opt/groomer
/usr/sbin/led &
./init.sh
fi
fi
exit 0
|
{
"pile_set_name": "Github"
}
|
#ifndef PYTHONIC_NUMPY_LINALG_MATRIX_POWER_HPP
#define PYTHONIC_NUMPY_LINALG_MATRIX_POWER_HPP
#include "pythonic/include/numpy/linalg/matrix_power.hpp"
#include "pythonic/numpy/array.hpp"
#include "pythonic/numpy/asarray.hpp"
#include "pythonic/numpy/identity.hpp"
#include "pythonic/numpy/dot.hpp"
#include "pythonic/builtins/NotImplementedError.hpp"
PYTHONIC_NS_BEGIN
namespace numpy
{
namespace linalg
{
namespace details
{
template <class E>
E fast_pow(E const &base, long n)
{
if (n == 1)
return base;
if (n == 2)
return numpy::functor::dot{}(base, base);
if (n == 3) {
auto tmp = numpy::functor::dot{}(base, base);
return numpy::functor::dot{}(tmp, base);
}
// starting from here, we know for sure that tmp will point to newly
// allocated memory
// this is used to optimize in-place dot computation in the odd case
auto tmp = fast_pow(base, n / 2);
if (n & 1) {
auto next = numpy::functor::dot{}(tmp, tmp);
return numpy::functor::dot{}(base, next, tmp);
} else {
return numpy::functor::dot{}(tmp, tmp);
}
}
}
template <class E>
auto matrix_power(E const &expr, long n)
-> decltype(numpy::functor::array{}(expr))
{
if (n == 0)
return numpy::functor::identity{}(expr.template shape<0>(),
types::dtype_t<typename E::dtype>{});
if (n > 0) {
auto base = numpy::functor::asarray{}(expr);
return details::fast_pow(base, n);
}
throw pythonic::builtins::NotImplementedError("negative power");
}
}
}
PYTHONIC_NS_END
#endif
|
{
"pile_set_name": "Github"
}
|
#region License
// Copyright (c) 2013, ClearCanvas Inc.
// All rights reserved.
// http://www.clearcanvas.ca
//
// This file is part of the ClearCanvas RIS/PACS open source project.
//
// The ClearCanvas RIS/PACS open source project is free software: you can
// redistribute it and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// The ClearCanvas RIS/PACS open source project is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
// Public License for more details.
//
// You should have received a copy of the GNU General Public License along with
// the ClearCanvas RIS/PACS open source project. If not, see
// <http://www.gnu.org/licenses/>.
#endregion
using System;
using System.Collections.Generic;
using System.Text;
using ClearCanvas.Common.Serialization;
using System.Runtime.Serialization;
namespace ClearCanvas.Ris.Application.Common
{
[DataContract]
public class TextQueryResponse<TSummary> : DataContractBase
where TSummary : DataContractBase
{
public TextQueryResponse(bool tooManyMatches, List<TSummary> matches)
{
this.Matches = matches;
this.TooManyMatches = tooManyMatches;
}
[DataMember]
public List<TSummary> Matches;
[DataMember]
public bool TooManyMatches;
}
}
|
{
"pile_set_name": "Github"
}
|
CREATE TABLE list (id VARCHAR(2) NOT NULL, value VARCHAR(64) NOT NULL, PRIMARY KEY(id));
INSERT INTO "list" ("id", "value") VALUES ('ff', 'Fulah');
INSERT INTO "list" ("id", "value") VALUES ('ff_CM', 'Fulah (Cameroon)');
INSERT INTO "list" ("id", "value") VALUES ('ff_GN', 'Fulah (Guinea)');
INSERT INTO "list" ("id", "value") VALUES ('ff_MR', 'Fulah (Mauritania)');
INSERT INTO "list" ("id", "value") VALUES ('ff_SN', 'Fulah (Senegal)');
INSERT INTO "list" ("id", "value") VALUES ('no', 'Norwegian');
INSERT INTO "list" ("id", "value") VALUES ('no_NO', 'Norwegian (Norway)');
INSERT INTO "list" ("id", "value") VALUES ('os', 'Ossetic');
INSERT INTO "list" ("id", "value") VALUES ('os_GE', 'Ossetic (Georgia)');
INSERT INTO "list" ("id", "value") VALUES ('os_RU', 'Ossetic (Russia)');
INSERT INTO "list" ("id", "value") VALUES ('gd', 'Scottish Gaelic');
INSERT INTO "list" ("id", "value") VALUES ('gd_GB', 'Scottish Gaelic (United Kingdom)');
INSERT INTO "list" ("id", "value") VALUES ('sh', 'Serbo-Croatian');
INSERT INTO "list" ("id", "value") VALUES ('sh_BA', 'Serbo-Croatian (Bosnia & Herzegovina)');
INSERT INTO "list" ("id", "value") VALUES ('tl', 'Tagalog');
INSERT INTO "list" ("id", "value") VALUES ('tl_PH', 'Tagalog (Philippines)');
INSERT INTO "list" ("id", "value") VALUES ('yi', 'Yiddish');
INSERT INTO "list" ("id", "value") VALUES ('en', 'अंग्रेजी');
INSERT INTO "list" ("id", "value") VALUES ('en_AS', 'अंग्रेजी (अमेरिकी समोआ)');
INSERT INTO "list" ("id", "value") VALUES ('en_AU', 'अंग्रेजी (अष्ट्रेलिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_IM', 'अंग्रेजी (आइज्ले अफ् म्यान)');
INSERT INTO "list" ("id", "value") VALUES ('en_AI', 'अंग्रेजी (आङ्गुइला)');
INSERT INTO "list" ("id", "value") VALUES ('en_IE', 'अंग्रेजी (आयरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('en_MP', 'अंग्रेजी (उत्तरी मारिआना टापु)');
INSERT INTO "list" ("id", "value") VALUES ('en_AG', 'अंग्रेजी (एन्टिगुआ र बारबुडा)');
INSERT INTO "list" ("id", "value") VALUES ('en_ER', 'अंग्रेजी (एरित्रिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_KI', 'अंग्रेजी (किरिबाटी)');
INSERT INTO "list" ("id", "value") VALUES ('en_CK', 'अंग्रेजी (कुक टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_KE', 'अंग्रेजी (केन्या)');
INSERT INTO "list" ("id", "value") VALUES ('en_KY', 'अंग्रेजी (केयमान टापु)');
INSERT INTO "list" ("id", "value") VALUES ('en_CC', 'अंग्रेजी (कोकोस (किलिंग) टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_CA', 'अंग्रेजी (क्यानाडा)');
INSERT INTO "list" ("id", "value") VALUES ('en_CM', 'अंग्रेजी (क्यामरून)');
INSERT INTO "list" ("id", "value") VALUES ('en_CX', 'अंग्रेजी (क्रिष्टमस टापु)');
INSERT INTO "list" ("id", "value") VALUES ('en_GM', 'अंग्रेजी (गाम्विया)');
INSERT INTO "list" ("id", "value") VALUES ('en_GG', 'अंग्रेजी (गुएर्नसे)');
INSERT INTO "list" ("id", "value") VALUES ('en_GY', 'अंग्रेजी (गुयाना)');
INSERT INTO "list" ("id", "value") VALUES ('en_GU', 'अंग्रेजी (गुवाम)');
INSERT INTO "list" ("id", "value") VALUES ('en_GD', 'अंग्रेजी (ग्रेनाडा)');
INSERT INTO "list" ("id", "value") VALUES ('en_GH', 'अंग्रेजी (घाना)');
INSERT INTO "list" ("id", "value") VALUES ('en_JM', 'अंग्रेजी (जमाइका)');
INSERT INTO "list" ("id", "value") VALUES ('en_JE', 'अंग्रेजी (जर्सी)');
INSERT INTO "list" ("id", "value") VALUES ('en_ZM', 'अंग्रेजी (जाम्बिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_GI', 'अंग्रेजी (जिब्राल्टार)');
INSERT INTO "list" ("id", "value") VALUES ('en_ZW', 'अंग्रेजी (जिम्बाबे)');
INSERT INTO "list" ("id", "value") VALUES ('en_TO', 'अंग्रेजी (टोंगा)');
INSERT INTO "list" ("id", "value") VALUES ('en_DG', 'अंग्रेजी (डियगो गार्सिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_DM', 'अंग्रेजी (डोमिनिका)');
INSERT INTO "list" ("id", "value") VALUES ('en_TZ', 'अंग्रेजी (तान्जानिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_TV', 'अंग्रेजी (तुभालु)');
INSERT INTO "list" ("id", "value") VALUES ('en_TC', 'अंग्रेजी (तुर्क र काइकोस टापु)');
INSERT INTO "list" ("id", "value") VALUES ('en_TK', 'अंग्रेजी (तोकेलाउ)');
INSERT INTO "list" ("id", "value") VALUES ('en_TT', 'अंग्रेजी (त्रिनिडाड एण्ड टोबागो)');
INSERT INTO "list" ("id", "value") VALUES ('en_ZA', 'अंग्रेजी (दक्षिण अफ्रिका)');
INSERT INTO "list" ("id", "value") VALUES ('en_SS', 'अंग्रेजी (दक्षिणी सुडान)');
INSERT INTO "list" ("id", "value") VALUES ('en_NG', 'अंग्रेजी (नाइजेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_NR', 'अंग्रेजी (नाउरू)');
INSERT INTO "list" ("id", "value") VALUES ('en_NA', 'अंग्रेजी (नामिबिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_NU', 'अंग्रेजी (नियुइ)');
INSERT INTO "list" ("id", "value") VALUES ('en_NF', 'अंग्रेजी (नोरफोल्क टापु)');
INSERT INTO "list" ("id", "value") VALUES ('en_NZ', 'अंग्रेजी (न्युजिल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('en_PG', 'अंग्रेजी (पपुआ न्यू गाइनिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_PW', 'अंग्रेजी (पलाउ)');
INSERT INTO "list" ("id", "value") VALUES ('en_PK', 'अंग्रेजी (पाकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('en_PN', 'अंग्रेजी (पिटकाइर्न टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_PR', 'अंग्रेजी (पुएर्टो रिको)');
INSERT INTO "list" ("id", "value") VALUES ('en_FK', 'अंग्रेजी (फकल्याण्ड टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_FJ', 'अंग्रेजी (फिजी)');
INSERT INTO "list" ("id", "value") VALUES ('en_PH', 'अंग्रेजी (फिलिपिन्स)');
INSERT INTO "list" ("id", "value") VALUES ('en_BM', 'अंग्रेजी (बर्मुडा)');
INSERT INTO "list" ("id", "value") VALUES ('en_BS', 'अंग्रेजी (बहामास)');
INSERT INTO "list" ("id", "value") VALUES ('en_BB', 'अंग्रेजी (बार्बाडोस)');
INSERT INTO "list" ("id", "value") VALUES ('en_GB', 'अंग्रेजी (बेलायत)');
INSERT INTO "list" ("id", "value") VALUES ('en_VG', 'अंग्रेजी (बेलायती भर्जिन टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_IO', 'अंग्रेजी (बेलायती हिन्द महासागर क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('en_BZ', 'अंग्रेजी (बेलिज)');
INSERT INTO "list" ("id", "value") VALUES ('en_BE', 'अंग्रेजी (बेल्जियम)');
INSERT INTO "list" ("id", "value") VALUES ('en_BW', 'अंग्रेजी (बोट्स्वाना)');
INSERT INTO "list" ("id", "value") VALUES ('en_VU', 'अंग्रेजी (भानुआतु)');
INSERT INTO "list" ("id", "value") VALUES ('en_IN', 'अंग्रेजी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('en_MO', 'अंग्रेजी (मकावो चिनिँया स्वशासित क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('en_MG', 'अंग्रेजी (मडागास्कर)');
INSERT INTO "list" ("id", "value") VALUES ('en_MY', 'अंग्रेजी (मलेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_FM', 'अंग्रेजी (माइक्रोनेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_MU', 'अंग्रेजी (माउरिटस)');
INSERT INTO "list" ("id", "value") VALUES ('en_MH', 'अंग्रेजी (मार्शल टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_MW', 'अंग्रेजी (मालावी)');
INSERT INTO "list" ("id", "value") VALUES ('en_MT', 'अंग्रेजी (माल्टा)');
INSERT INTO "list" ("id", "value") VALUES ('en_MS', 'अंग्रेजी (मोन्टसेर्राट)');
INSERT INTO "list" ("id", "value") VALUES ('en_UG', 'अंग्रेजी (युगाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('en_RW', 'अंग्रेजी (रवाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('en_LR', 'अंग्रेजी (लाइबेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_LS', 'अंग्रेजी (लेसोथो)');
INSERT INTO "list" ("id", "value") VALUES ('en_UM', 'अंग्रेजी (संयुक्त राज्य बाह्य टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_VI', 'अंग्रेजी (संयुक्त राज्य भर्जिन टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_US', 'अंग्रेजी (संयुक्त राज्य)');
INSERT INTO "list" ("id", "value") VALUES ('en_WS', 'अंग्रेजी (सामोआ)');
INSERT INTO "list" ("id", "value") VALUES ('en_SL', 'अंग्रेजी (सिएर्रा लिओन)');
INSERT INTO "list" ("id", "value") VALUES ('en_SG', 'अंग्रेजी (सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('en_SX', 'अंग्रेजी (सिन्ट मार्टेन)');
INSERT INTO "list" ("id", "value") VALUES ('en_SD', 'अंग्रेजी (सुडान)');
INSERT INTO "list" ("id", "value") VALUES ('en_SC', 'अंग्रेजी (सेचेलेस)');
INSERT INTO "list" ("id", "value") VALUES ('en_KN', 'अंग्रेजी (सेन्ट किट्स र नेभिस)');
INSERT INTO "list" ("id", "value") VALUES ('en_VC', 'अंग्रेजी (सेन्ट भिन्सेन्ट र ग्रेनाडिन्स)');
INSERT INTO "list" ("id", "value") VALUES ('en_LC', 'अंग्रेजी (सेन्ट लुसिया)');
INSERT INTO "list" ("id", "value") VALUES ('en_SH', 'अंग्रेजी (सेन्ट हेलेना)');
INSERT INTO "list" ("id", "value") VALUES ('en_SB', 'अंग्रेजी (सोलोमोन टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('en_SZ', 'अंग्रेजी (स्वाजिल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('en_HK', 'अंग्रेजी (हङकङ चिनिया समाजवादी स्वायत्त क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('az', 'अजरबैजानी');
INSERT INTO "list" ("id", "value") VALUES ('az_AZ', 'अजरबैजानी (अजरबैजान)');
INSERT INTO "list" ("id", "value") VALUES ('az_Latn_AZ', 'अजरबैजानी (ल्याटिन, अजरबैजान)');
INSERT INTO "list" ("id", "value") VALUES ('az_Latn', 'अजरबैजानी (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('az_Cyrl_AZ', 'अजरबैजानी (सिरिलिक, अजरबैजान)');
INSERT INTO "list" ("id", "value") VALUES ('az_Cyrl', 'अजरबैजानी (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('af', 'अफ्रिकान्स');
INSERT INTO "list" ("id", "value") VALUES ('af_ZA', 'अफ्रिकान्स (दक्षिण अफ्रिका)');
INSERT INTO "list" ("id", "value") VALUES ('af_NA', 'अफ्रिकान्स (नामिबिया)');
INSERT INTO "list" ("id", "value") VALUES ('am', 'अम्हारिक');
INSERT INTO "list" ("id", "value") VALUES ('am_ET', 'अम्हारिक (इथियोपिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar', 'अरबी');
INSERT INTO "list" ("id", "value") VALUES ('ar_DZ', 'अरबी (अल्जेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_IL', 'अरबी (इजरायल)');
INSERT INTO "list" ("id", "value") VALUES ('ar_EG', 'अरबी (इजिप्ट)');
INSERT INTO "list" ("id", "value") VALUES ('ar_IQ', 'अरबी (इराक)');
INSERT INTO "list" ("id", "value") VALUES ('ar_ER', 'अरबी (एरित्रिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_OM', 'अरबी (ओमन)');
INSERT INTO "list" ("id", "value") VALUES ('ar_QA', 'अरबी (कतार)');
INSERT INTO "list" ("id", "value") VALUES ('ar_KW', 'अरबी (कुवेत)');
INSERT INTO "list" ("id", "value") VALUES ('ar_KM', 'अरबी (कोमोरोस)');
INSERT INTO "list" ("id", "value") VALUES ('ar_TD', 'अरबी (चाड)');
INSERT INTO "list" ("id", "value") VALUES ('ar_JO', 'अरबी (जोर्डन)');
INSERT INTO "list" ("id", "value") VALUES ('ar_TN', 'अरबी (ट्युनिसिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_DJ', 'अरबी (डिजिबुटी)');
INSERT INTO "list" ("id", "value") VALUES ('ar_SS', 'अरबी (दक्षिणी सुडान)');
INSERT INTO "list" ("id", "value") VALUES ('ar_EH', 'अरबी (पश्चिमी साहारा)');
INSERT INTO "list" ("id", "value") VALUES ('ar_PS', 'अरबी (प्यालेस्टनी भू-भागहरु)');
INSERT INTO "list" ("id", "value") VALUES ('ar_BH', 'अरबी (बहराइन)');
INSERT INTO "list" ("id", "value") VALUES ('ar_MR', 'अरबी (माउरिटानिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_MA', 'अरबी (मोरोक्को)');
INSERT INTO "list" ("id", "value") VALUES ('ar_YE', 'अरबी (येमेन)');
INSERT INTO "list" ("id", "value") VALUES ('ar_LY', 'अरबी (लिबिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_LB', 'अरबी (लेबनन)');
INSERT INTO "list" ("id", "value") VALUES ('ar_AE', 'अरबी (संयुक्त अरब इमिराट्स)');
INSERT INTO "list" ("id", "value") VALUES ('ar_SA', 'अरबी (साउदी अरब)');
INSERT INTO "list" ("id", "value") VALUES ('ar_SY', 'अरबी (सिरिया)');
INSERT INTO "list" ("id", "value") VALUES ('ar_SD', 'अरबी (सुडान)');
INSERT INTO "list" ("id", "value") VALUES ('ar_SO', 'अरबी (सोमालिया)');
INSERT INTO "list" ("id", "value") VALUES ('sq', 'अल्बेनियन');
INSERT INTO "list" ("id", "value") VALUES ('sq_AL', 'अल्बेनियन (अल्बानिया)');
INSERT INTO "list" ("id", "value") VALUES ('sq_XK', 'अल्बेनियन (कोसोवो)');
INSERT INTO "list" ("id", "value") VALUES ('sq_MK', 'अल्बेनियन (म्याकेडोनिया)');
INSERT INTO "list" ("id", "value") VALUES ('ga', 'आइरिश');
INSERT INTO "list" ("id", "value") VALUES ('ga_IE', 'आइरिश (आयरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('is', 'आइसल्यान्डिक');
INSERT INTO "list" ("id", "value") VALUES ('is_IS', 'आइसल्यान्डिक (आइस्ल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('ak', 'आकान');
INSERT INTO "list" ("id", "value") VALUES ('ak_GH', 'आकान (घाना)');
INSERT INTO "list" ("id", "value") VALUES ('hy', 'आर्मेनियाली');
INSERT INTO "list" ("id", "value") VALUES ('hy_AM', 'आर्मेनियाली (आर्मेनिया)');
INSERT INTO "list" ("id", "value") VALUES ('as', 'आसामी');
INSERT INTO "list" ("id", "value") VALUES ('as_IN', 'आसामी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ig', 'इग्बो');
INSERT INTO "list" ("id", "value") VALUES ('ig_NG', 'इग्बो (नाइजेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('it', 'इटालियन');
INSERT INTO "list" ("id", "value") VALUES ('it_IT', 'इटालियन (इटाली)');
INSERT INTO "list" ("id", "value") VALUES ('it_SM', 'इटालियन (सान् मारिनो)');
INSERT INTO "list" ("id", "value") VALUES ('it_CH', 'इटालियन (स्विजरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('id', 'इन्डोनेसियाली');
INSERT INTO "list" ("id", "value") VALUES ('id_ID', 'इन्डोनेसियाली (इन्डोनेशिया)');
INSERT INTO "list" ("id", "value") VALUES ('ee', 'इवि');
INSERT INTO "list" ("id", "value") VALUES ('ee_GH', 'इवि (घाना)');
INSERT INTO "list" ("id", "value") VALUES ('ee_TG', 'इवि (टोगो)');
INSERT INTO "list" ("id", "value") VALUES ('et', 'इस्टोनियाली');
INSERT INTO "list" ("id", "value") VALUES ('et_EE', 'इस्टोनियाली (इस्टोनिया)');
INSERT INTO "list" ("id", "value") VALUES ('ug', 'उइघुर');
INSERT INTO "list" ("id", "value") VALUES ('ug_Arab_CN', 'उइघुर (अरबी, चीन)');
INSERT INTO "list" ("id", "value") VALUES ('ug_Arab', 'उइघुर (अरबी)');
INSERT INTO "list" ("id", "value") VALUES ('ug_CN', 'उइघुर (चीन)');
INSERT INTO "list" ("id", "value") VALUES ('uz', 'उज्बेकी');
INSERT INTO "list" ("id", "value") VALUES ('uz_AF', 'उज्बेकी (अफगानिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Arab_AF', 'उज्बेकी (अरबी, अफगानिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Arab', 'उज्बेकी (अरबी)');
INSERT INTO "list" ("id", "value") VALUES ('uz_UZ', 'उज्बेकी (उज्बेकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Latn_UZ', 'उज्बेकी (ल्याटिन, उज्बेकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Latn', 'उज्बेकी (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Cyrl_UZ', 'उज्बेकी (सिरिलिक, उज्बेकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('uz_Cyrl', 'उज्बेकी (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('nd', 'उत्तर नेडेबेले');
INSERT INTO "list" ("id", "value") VALUES ('nd_ZW', 'उत्तर नेडेबेले (जिम्बाबे)');
INSERT INTO "list" ("id", "value") VALUES ('se', 'उत्तरी सामी');
INSERT INTO "list" ("id", "value") VALUES ('se_NO', 'उत्तरी सामी (नर्वे)');
INSERT INTO "list" ("id", "value") VALUES ('se_FI', 'उत्तरी सामी (फिन्ल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('se_SE', 'उत्तरी सामी (स्विडेन)');
INSERT INTO "list" ("id", "value") VALUES ('ur', 'उर्दु');
INSERT INTO "list" ("id", "value") VALUES ('ur_PK', 'उर्दु (पाकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('ur_IN', 'उर्दु (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('eo', 'एस्पेरान्तो');
INSERT INTO "list" ("id", "value") VALUES ('or', 'ओरिया');
INSERT INTO "list" ("id", "value") VALUES ('or_IN', 'ओरिया (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('om', 'ओरोमो');
INSERT INTO "list" ("id", "value") VALUES ('om_ET', 'ओरोमो (इथियोपिया)');
INSERT INTO "list" ("id", "value") VALUES ('om_KE', 'ओरोमो (केन्या)');
INSERT INTO "list" ("id", "value") VALUES ('kn', 'कन्नाडा');
INSERT INTO "list" ("id", "value") VALUES ('kn_IN', 'कन्नाडा (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('kk', 'काजाख');
INSERT INTO "list" ("id", "value") VALUES ('kk_KZ', 'काजाख (काजाकस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('kk_Cyrl_KZ', 'काजाख (सिरिलिक, काजाकस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('kk_Cyrl', 'काजाख (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('kl', 'कालालिसुट');
INSERT INTO "list" ("id", "value") VALUES ('kl_GL', 'कालालिसुट (ग्रिनल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('ks', 'काश्मीरी');
INSERT INTO "list" ("id", "value") VALUES ('ks_Arab_IN', 'काश्मीरी (अरबी, भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ks_Arab', 'काश्मीरी (अरबी)');
INSERT INTO "list" ("id", "value") VALUES ('ks_IN', 'काश्मीरी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ki', 'किकुयु');
INSERT INTO "list" ("id", "value") VALUES ('ki_KE', 'किकुयु (केन्या)');
INSERT INTO "list" ("id", "value") VALUES ('rw', 'किन्यारवान्डा');
INSERT INTO "list" ("id", "value") VALUES ('rw_RW', 'किन्यारवान्डा (रवाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('ky', 'किर्गिज');
INSERT INTO "list" ("id", "value") VALUES ('ky_KG', 'किर्गिज (किर्गिस्थान)');
INSERT INTO "list" ("id", "value") VALUES ('ky_Cyrl_KG', 'किर्गिज (सिरिलिक, किर्गिस्थान)');
INSERT INTO "list" ("id", "value") VALUES ('ky_Cyrl', 'किर्गिज (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('ko', 'कोरियाली');
INSERT INTO "list" ("id", "value") VALUES ('ko_KP', 'कोरियाली (उत्तर कोरिया)');
INSERT INTO "list" ("id", "value") VALUES ('ko_KR', 'कोरियाली (दक्षिण कोरिया)');
INSERT INTO "list" ("id", "value") VALUES ('kw', 'कोर्निश');
INSERT INTO "list" ("id", "value") VALUES ('kw_GB', 'कोर्निश (बेलायत)');
INSERT INTO "list" ("id", "value") VALUES ('ca', 'क्याटालन');
INSERT INTO "list" ("id", "value") VALUES ('ca_AD', 'क्याटालन (अन्डोर्रा)');
INSERT INTO "list" ("id", "value") VALUES ('ca_IT', 'क्याटालन (इटाली)');
INSERT INTO "list" ("id", "value") VALUES ('ca_FR', 'क्याटालन (फ्रान्स)');
INSERT INTO "list" ("id", "value") VALUES ('ca_ES', 'क्याटालन (स्पेन)');
INSERT INTO "list" ("id", "value") VALUES ('hr', 'क्रोएशियाली');
INSERT INTO "list" ("id", "value") VALUES ('hr_HR', 'क्रोएशियाली (क्रोएशिया)');
INSERT INTO "list" ("id", "value") VALUES ('hr_BA', 'क्रोएशियाली (बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('qu', 'क्वेचुवा');
INSERT INTO "list" ("id", "value") VALUES ('qu_EC', 'क्वेचुवा (इक्वडेर)');
INSERT INTO "list" ("id", "value") VALUES ('qu_PE', 'क्वेचुवा (पेरू)');
INSERT INTO "list" ("id", "value") VALUES ('qu_BO', 'क्वेचुवा (बोलिभिया)');
INSERT INTO "list" ("id", "value") VALUES ('km', 'खमेर');
INSERT INTO "list" ("id", "value") VALUES ('km_KH', 'खमेर (कम्बोडिया)');
INSERT INTO "list" ("id", "value") VALUES ('gl', 'गलिसियाली');
INSERT INTO "list" ("id", "value") VALUES ('gl_ES', 'गलिसियाली (स्पेन)');
INSERT INTO "list" ("id", "value") VALUES ('lg', 'गान्डा');
INSERT INTO "list" ("id", "value") VALUES ('lg_UG', 'गान्डा (युगाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('gu', 'गुजराती');
INSERT INTO "list" ("id", "value") VALUES ('gu_IN', 'गुजराती (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('el', 'ग्रीक');
INSERT INTO "list" ("id", "value") VALUES ('el_GR', 'ग्रीक (ग्रिस)');
INSERT INTO "list" ("id", "value") VALUES ('el_CY', 'ग्रीक (साइप्रस)');
INSERT INTO "list" ("id", "value") VALUES ('zh', 'चिनियाँ');
INSERT INTO "list" ("id", "value") VALUES ('zh_CN', 'चिनियाँ (चीन)');
INSERT INTO "list" ("id", "value") VALUES ('zh_TW', 'चिनियाँ (ताइवान)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hant_TW', 'चिनियाँ (परम्परागत चिनी, ताइवान)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hant_MO', 'चिनियाँ (परम्परागत चिनी, मकावो चिनिँया स्वशासित क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hant_HK', 'चिनियाँ (परम्परागत चिनी, हङकङ चिनिया समाजवादी स्वायत्त क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hant', 'चिनियाँ (परम्परागत चिनी)');
INSERT INTO "list" ("id", "value") VALUES ('zh_MO', 'चिनियाँ (मकावो चिनिँया स्वशासित क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hans_CN', 'चिनियाँ (सरलिकृत चिनी, चीन)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hans_MO', 'चिनियाँ (सरलिकृत चिनी, मकावो चिनिँया स्वशासित क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hans_SG', 'चिनियाँ (सरलिकृत चिनी, सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hans_HK', 'चिनियाँ (सरलिकृत चिनी, हङकङ चिनिया समाजवादी स्वायत्त क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('zh_Hans', 'चिनियाँ (सरलिकृत चिनी)');
INSERT INTO "list" ("id", "value") VALUES ('zh_SG', 'चिनियाँ (सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('zh_HK', 'चिनियाँ (हङकङ चिनिया समाजवादी स्वायत्त क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('cs', 'चेक');
INSERT INTO "list" ("id", "value") VALUES ('cs_CZ', 'चेक (चेक गणतन्त्र)');
INSERT INTO "list" ("id", "value") VALUES ('ka', 'जर्जियाली');
INSERT INTO "list" ("id", "value") VALUES ('ka_GE', 'जर्जियाली (जर्जिया)');
INSERT INTO "list" ("id", "value") VALUES ('de', 'जर्मन');
INSERT INTO "list" ("id", "value") VALUES ('de_AT', 'जर्मन (अष्ट्रिया)');
INSERT INTO "list" ("id", "value") VALUES ('de_DE', 'जर्मन (जर्मनी)');
INSERT INTO "list" ("id", "value") VALUES ('de_BE', 'जर्मन (बेल्जियम)');
INSERT INTO "list" ("id", "value") VALUES ('de_LU', 'जर्मन (लक्जेमबर्ग)');
INSERT INTO "list" ("id", "value") VALUES ('de_LI', 'जर्मन (लिएखटेन्स्टाइन)');
INSERT INTO "list" ("id", "value") VALUES ('de_CH', 'जर्मन (स्विजरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('ja', 'जापानी');
INSERT INTO "list" ("id", "value") VALUES ('ja_JP', 'जापानी (जापान)');
INSERT INTO "list" ("id", "value") VALUES ('zu', 'जुलु');
INSERT INTO "list" ("id", "value") VALUES ('zu_ZA', 'जुलु (दक्षिण अफ्रिका)');
INSERT INTO "list" ("id", "value") VALUES ('dz', 'जोङ्खा');
INSERT INTO "list" ("id", "value") VALUES ('dz_BT', 'जोङ्खा (भुटान)');
INSERT INTO "list" ("id", "value") VALUES ('tr', 'टर्किश');
INSERT INTO "list" ("id", "value") VALUES ('tr_TR', 'टर्किश (टर्की)');
INSERT INTO "list" ("id", "value") VALUES ('tr_CY', 'टर्किश (साइप्रस)');
INSERT INTO "list" ("id", "value") VALUES ('to', 'टोङ्गन');
INSERT INTO "list" ("id", "value") VALUES ('to_TO', 'टोङ्गन (टोंगा)');
INSERT INTO "list" ("id", "value") VALUES ('nl', 'डच');
INSERT INTO "list" ("id", "value") VALUES ('nl_AW', 'डच (आरूबा)');
INSERT INTO "list" ("id", "value") VALUES ('nl_CW', 'डच (कुराकाओ)');
INSERT INTO "list" ("id", "value") VALUES ('nl_BQ', 'डच (क्यारिवियन नेदरल्याण्ड्स)');
INSERT INTO "list" ("id", "value") VALUES ('nl_NL', 'डच (नेदरल्याण्ड्स)');
INSERT INTO "list" ("id", "value") VALUES ('nl_BE', 'डच (बेल्जियम)');
INSERT INTO "list" ("id", "value") VALUES ('nl_SX', 'डच (सिन्ट मार्टेन)');
INSERT INTO "list" ("id", "value") VALUES ('nl_SR', 'डच (सुरिनेम)');
INSERT INTO "list" ("id", "value") VALUES ('da', 'डेनिश');
INSERT INTO "list" ("id", "value") VALUES ('da_GL', 'डेनिश (ग्रिनल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('da_DK', 'डेनिश (डेनमार्क)');
INSERT INTO "list" ("id", "value") VALUES ('ta', 'तामिल');
INSERT INTO "list" ("id", "value") VALUES ('ta_IN', 'तामिल (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ta_MY', 'तामिल (मलेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('ta_LK', 'तामिल (श्रीलङ्का)');
INSERT INTO "list" ("id", "value") VALUES ('ta_SG', 'तामिल (सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('ti', 'तिग्रीन्या');
INSERT INTO "list" ("id", "value") VALUES ('ti_ET', 'तिग्रीन्या (इथियोपिया)');
INSERT INTO "list" ("id", "value") VALUES ('ti_ER', 'तिग्रीन्या (एरित्रिया)');
INSERT INTO "list" ("id", "value") VALUES ('bo', 'तिब्बती');
INSERT INTO "list" ("id", "value") VALUES ('bo_CN', 'तिब्बती (चीन)');
INSERT INTO "list" ("id", "value") VALUES ('bo_IN', 'तिब्बती (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('te', 'तेलुगु');
INSERT INTO "list" ("id", "value") VALUES ('te_IN', 'तेलुगु (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('th', 'थाई');
INSERT INTO "list" ("id", "value") VALUES ('th_TH', 'थाई (थाइल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('nn', 'नर्वेली नाइनोर्स्क');
INSERT INTO "list" ("id", "value") VALUES ('nn_NO', 'नर्वेली नाइनोर्स्क (नर्वे)');
INSERT INTO "list" ("id", "value") VALUES ('nb', 'नर्वेली बोकमाल');
INSERT INTO "list" ("id", "value") VALUES ('nb_NO', 'नर्वेली बोकमाल (नर्वे)');
INSERT INTO "list" ("id", "value") VALUES ('nb_SJ', 'नर्वेली बोकमाल (सभाल्बार्ड र जान मायेन)');
INSERT INTO "list" ("id", "value") VALUES ('ne', 'नेपाली');
INSERT INTO "list" ("id", "value") VALUES ('ne_NP', 'नेपाली (नेपाल)');
INSERT INTO "list" ("id", "value") VALUES ('ne_IN', 'नेपाली (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('pa', 'पंजाबी');
INSERT INTO "list" ("id", "value") VALUES ('pa_Arab_PK', 'पंजाबी (अरबी, पाकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('pa_Arab', 'पंजाबी (अरबी)');
INSERT INTO "list" ("id", "value") VALUES ('pa_Guru_IN', 'पंजाबी (गुरूमुखी, भारत)');
INSERT INTO "list" ("id", "value") VALUES ('pa_Guru', 'पंजाबी (गुरूमुखी)');
INSERT INTO "list" ("id", "value") VALUES ('pa_PK', 'पंजाबी (पाकिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('pa_IN', 'पंजाबी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ps', 'पाश्तो');
INSERT INTO "list" ("id", "value") VALUES ('ps_AF', 'पाश्तो (अफगानिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('pt', 'पोर्तुगी');
INSERT INTO "list" ("id", "value") VALUES ('pt_AO', 'पोर्तुगी (अङ्गोला)');
INSERT INTO "list" ("id", "value") VALUES ('pt_CV', 'पोर्तुगी (केप भर्डे)');
INSERT INTO "list" ("id", "value") VALUES ('pt_GW', 'पोर्तुगी (गिनी-बिसाउ)');
INSERT INTO "list" ("id", "value") VALUES ('pt_TL', 'पोर्तुगी (टिमोर-लेस्टे)');
INSERT INTO "list" ("id", "value") VALUES ('pt_PT', 'पोर्तुगी (पोर्चुगल)');
INSERT INTO "list" ("id", "value") VALUES ('pt_BR', 'पोर्तुगी (ब्राजिल)');
INSERT INTO "list" ("id", "value") VALUES ('pt_MO', 'पोर्तुगी (मकावो चिनिँया स्वशासित क्षेत्र)');
INSERT INTO "list" ("id", "value") VALUES ('pt_MZ', 'पोर्तुगी (मोजाम्बिक)');
INSERT INTO "list" ("id", "value") VALUES ('pt_ST', 'पोर्तुगी (साओ टोमे र प्रिन्सिप)');
INSERT INTO "list" ("id", "value") VALUES ('pl', 'पोलिश');
INSERT INTO "list" ("id", "value") VALUES ('pl_PL', 'पोलिश (पोल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('fa', 'फारसी');
INSERT INTO "list" ("id", "value") VALUES ('fa_AF', 'फारसी (अफगानिस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('fa_IR', 'फारसी (इरान)');
INSERT INTO "list" ("id", "value") VALUES ('fo', 'फारोज');
INSERT INTO "list" ("id", "value") VALUES ('fo_FO', 'फारोज (फारोर टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('fi', 'फिनिश');
INSERT INTO "list" ("id", "value") VALUES ('fi_FI', 'फिनिश (फिन्ल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('fr', 'फ्रान्सेली');
INSERT INTO "list" ("id", "value") VALUES ('fr_DZ', 'फ्रान्सेली (अल्जेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CI', 'फ्रान्सेली (आइभरी कोस्ट)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CF', 'फ्रान्सेली (केन्द्रीय अफ्रिकी गणतन्त्र)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CG', 'फ्रान्सेली (कोङ्गो - ब्राज्जाभिल्ले)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CD', 'फ्रान्सेली (कोङ्गो-किन्शासा)');
INSERT INTO "list" ("id", "value") VALUES ('fr_KM', 'फ्रान्सेली (कोमोरोस)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CA', 'फ्रान्सेली (क्यानाडा)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CM', 'फ्रान्सेली (क्यामरून)');
INSERT INTO "list" ("id", "value") VALUES ('fr_GA', 'फ्रान्सेली (गावोन)');
INSERT INTO "list" ("id", "value") VALUES ('fr_GN', 'फ्रान्सेली (गिनी)');
INSERT INTO "list" ("id", "value") VALUES ('fr_GP', 'फ्रान्सेली (ग्वाडेलुप)');
INSERT INTO "list" ("id", "value") VALUES ('fr_TD', 'फ्रान्सेली (चाड)');
INSERT INTO "list" ("id", "value") VALUES ('fr_TG', 'फ्रान्सेली (टोगो)');
INSERT INTO "list" ("id", "value") VALUES ('fr_TN', 'फ्रान्सेली (ट्युनिसिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_DJ', 'फ्रान्सेली (डिजिबुटी)');
INSERT INTO "list" ("id", "value") VALUES ('fr_NC', 'फ्रान्सेली (नयाँ कालेडोनिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_NE', 'फ्रान्सेली (नाइजर)');
INSERT INTO "list" ("id", "value") VALUES ('fr_FR', 'फ्रान्सेली (फ्रान्स)');
INSERT INTO "list" ("id", "value") VALUES ('fr_GF', 'फ्रान्सेली (फ्रान्सेली गायना)');
INSERT INTO "list" ("id", "value") VALUES ('fr_PF', 'फ्रान्सेली (फ्रान्सेली पोलिनेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_BF', 'फ्रान्सेली (बर्किना फासो)');
INSERT INTO "list" ("id", "value") VALUES ('fr_BI', 'फ्रान्सेली (बुरूण्डी)');
INSERT INTO "list" ("id", "value") VALUES ('fr_BJ', 'फ्रान्सेली (बेनिन)');
INSERT INTO "list" ("id", "value") VALUES ('fr_BE', 'फ्रान्सेली (बेल्जियम)');
INSERT INTO "list" ("id", "value") VALUES ('fr_VU', 'फ्रान्सेली (भानुआतु)');
INSERT INTO "list" ("id", "value") VALUES ('fr_GQ', 'फ्रान्सेली (भू-मध्यीय गिनी)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MG', 'फ्रान्सेली (मडागास्कर)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MU', 'फ्रान्सेली (माउरिटस)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MR', 'फ्रान्सेली (माउरिटानिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_YT', 'फ्रान्सेली (मायोट्ट)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MQ', 'फ्रान्सेली (मार्टिनिक)');
INSERT INTO "list" ("id", "value") VALUES ('fr_ML', 'फ्रान्सेली (माली)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MC', 'फ्रान्सेली (मोनाको)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MA', 'फ्रान्सेली (मोरोक्को)');
INSERT INTO "list" ("id", "value") VALUES ('fr_RW', 'फ्रान्सेली (रवाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('fr_RE', 'फ्रान्सेली (रियुनियन)');
INSERT INTO "list" ("id", "value") VALUES ('fr_LU', 'फ्रान्सेली (लक्जेमबर्ग)');
INSERT INTO "list" ("id", "value") VALUES ('fr_WF', 'फ्रान्सेली (वालिस र फुटुना)');
INSERT INTO "list" ("id", "value") VALUES ('fr_SY', 'फ्रान्सेली (सिरिया)');
INSERT INTO "list" ("id", "value") VALUES ('fr_SC', 'फ्रान्सेली (सेचेलेस)');
INSERT INTO "list" ("id", "value") VALUES ('fr_SN', 'फ्रान्सेली (सेनेगाल)');
INSERT INTO "list" ("id", "value") VALUES ('fr_PM', 'फ्रान्सेली (सेन्ट पिर्रे र मिक्केलोन)');
INSERT INTO "list" ("id", "value") VALUES ('fr_BL', 'फ्रान्सेली (सेन्ट बार्थालेमी)');
INSERT INTO "list" ("id", "value") VALUES ('fr_MF', 'फ्रान्सेली (सेन्ट मार्टिन)');
INSERT INTO "list" ("id", "value") VALUES ('fr_CH', 'फ्रान्सेली (स्विजरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('fr_HT', 'फ्रान्सेली (हैटी)');
INSERT INTO "list" ("id", "value") VALUES ('fy', 'फ्रिजीयन');
INSERT INTO "list" ("id", "value") VALUES ('fy_NL', 'फ्रिजीयन (नेदरल्याण्ड्स)');
INSERT INTO "list" ("id", "value") VALUES ('bn', 'बंगाली');
INSERT INTO "list" ("id", "value") VALUES ('bn_BD', 'बंगाली (बङ्गलादेश)');
INSERT INTO "list" ("id", "value") VALUES ('bn_IN', 'बंगाली (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('bm', 'बाम्बारा');
INSERT INTO "list" ("id", "value") VALUES ('bm_Latn_ML', 'बाम्बारा (ल्याटिन, माली)');
INSERT INTO "list" ("id", "value") VALUES ('bm_Latn', 'बाम्बारा (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('eu', 'बास्क');
INSERT INTO "list" ("id", "value") VALUES ('eu_ES', 'बास्क (स्पेन)');
INSERT INTO "list" ("id", "value") VALUES ('bg', 'बुल्गेरियाली');
INSERT INTO "list" ("id", "value") VALUES ('bg_BG', 'बुल्गेरियाली (बुल्गेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('bs', 'बोस्नियाली');
INSERT INTO "list" ("id", "value") VALUES ('bs_BA', 'बोस्नियाली (बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('bs_Latn_BA', 'बोस्नियाली (ल्याटिन, बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('bs_Latn', 'बोस्नियाली (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('bs_Cyrl_BA', 'बोस्नियाली (सिरिलिक, बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('bs_Cyrl', 'बोस्नियाली (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('br', 'ब्रेटन');
INSERT INTO "list" ("id", "value") VALUES ('br_FR', 'ब्रेटन (फ्रान्स)');
INSERT INTO "list" ("id", "value") VALUES ('vi', 'भियतनामी');
INSERT INTO "list" ("id", "value") VALUES ('vi_VN', 'भियतनामी (भिएतनाम)');
INSERT INTO "list" ("id", "value") VALUES ('mn', 'मंगोल');
INSERT INTO "list" ("id", "value") VALUES ('mn_MN', 'मंगोल (मङ्गोलिया)');
INSERT INTO "list" ("id", "value") VALUES ('mn_Cyrl_MN', 'मंगोल (सिरिलिक, मङ्गोलिया)');
INSERT INTO "list" ("id", "value") VALUES ('mn_Cyrl', 'मंगोल (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('mr', 'मराठी');
INSERT INTO "list" ("id", "value") VALUES ('mr_IN', 'मराठी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('ml', 'मलयालम');
INSERT INTO "list" ("id", "value") VALUES ('ml_IN', 'मलयालम (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('mg', 'मलागासी');
INSERT INTO "list" ("id", "value") VALUES ('mg_MG', 'मलागासी (मडागास्कर)');
INSERT INTO "list" ("id", "value") VALUES ('ms', 'मलाया');
INSERT INTO "list" ("id", "value") VALUES ('ms_BN', 'मलाया (ब्रुनाइ)');
INSERT INTO "list" ("id", "value") VALUES ('ms_MY', 'मलाया (मलेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('ms_Latn_BN', 'मलाया (ल्याटिन, ब्रुनाइ)');
INSERT INTO "list" ("id", "value") VALUES ('ms_Latn_MY', 'मलाया (ल्याटिन, मलेसिया)');
INSERT INTO "list" ("id", "value") VALUES ('ms_Latn_SG', 'मलाया (ल्याटिन, सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('ms_Latn', 'मलाया (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('ms_SG', 'मलाया (सिङ्गापुर)');
INSERT INTO "list" ("id", "value") VALUES ('gv', 'मान्क्स');
INSERT INTO "list" ("id", "value") VALUES ('gv_IM', 'मान्क्स (आइज्ले अफ् म्यान)');
INSERT INTO "list" ("id", "value") VALUES ('mt', 'माल्टिज');
INSERT INTO "list" ("id", "value") VALUES ('mt_MT', 'माल्टिज (माल्टा)');
INSERT INTO "list" ("id", "value") VALUES ('mk', 'म्याकेडोनियन');
INSERT INTO "list" ("id", "value") VALUES ('mk_MK', 'म्याकेडोनियन (म्याकेडोनिया)');
INSERT INTO "list" ("id", "value") VALUES ('uk', 'युक्रेनी');
INSERT INTO "list" ("id", "value") VALUES ('uk_UA', 'युक्रेनी (युक्रेन)');
INSERT INTO "list" ("id", "value") VALUES ('yo', 'योरूवा');
INSERT INTO "list" ("id", "value") VALUES ('yo_NG', 'योरूवा (नाइजेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('yo_BJ', 'योरूवा (बेनिन)');
INSERT INTO "list" ("id", "value") VALUES ('rn', 'रूण्डी');
INSERT INTO "list" ("id", "value") VALUES ('rn_BI', 'रूण्डी (बुरूण्डी)');
INSERT INTO "list" ("id", "value") VALUES ('ru', 'रूसी');
INSERT INTO "list" ("id", "value") VALUES ('ru_KZ', 'रूसी (काजाकस्तान)');
INSERT INTO "list" ("id", "value") VALUES ('ru_KG', 'रूसी (किर्गिस्थान)');
INSERT INTO "list" ("id", "value") VALUES ('ru_BY', 'रूसी (बेलारूस)');
INSERT INTO "list" ("id", "value") VALUES ('ru_MD', 'रूसी (माल्डोभा)');
INSERT INTO "list" ("id", "value") VALUES ('ru_UA', 'रूसी (युक्रेन)');
INSERT INTO "list" ("id", "value") VALUES ('ru_RU', 'रूसी (रूस)');
INSERT INTO "list" ("id", "value") VALUES ('ro', 'रोमानियाली');
INSERT INTO "list" ("id", "value") VALUES ('ro_MD', 'रोमानियाली (माल्डोभा)');
INSERT INTO "list" ("id", "value") VALUES ('ro_RO', 'रोमानियाली (रोमानिया)');
INSERT INTO "list" ("id", "value") VALUES ('rm', 'रोमानिश');
INSERT INTO "list" ("id", "value") VALUES ('rm_CH', 'रोमानिश (स्विजरल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('lb', 'लक्जेम्बर्गिस');
INSERT INTO "list" ("id", "value") VALUES ('lb_LU', 'लक्जेम्बर्गिस (लक्जेमबर्ग)');
INSERT INTO "list" ("id", "value") VALUES ('lo', 'लाओ');
INSERT INTO "list" ("id", "value") VALUES ('lo_LA', 'लाओ (लाओस)');
INSERT INTO "list" ("id", "value") VALUES ('lv', 'लात्भियाली');
INSERT INTO "list" ("id", "value") VALUES ('lv_LV', 'लात्भियाली (लाट्भिया)');
INSERT INTO "list" ("id", "value") VALUES ('ln', 'लिंगाला');
INSERT INTO "list" ("id", "value") VALUES ('ln_AO', 'लिंगाला (अङ्गोला)');
INSERT INTO "list" ("id", "value") VALUES ('ln_CF', 'लिंगाला (केन्द्रीय अफ्रिकी गणतन्त्र)');
INSERT INTO "list" ("id", "value") VALUES ('ln_CG', 'लिंगाला (कोङ्गो - ब्राज्जाभिल्ले)');
INSERT INTO "list" ("id", "value") VALUES ('ln_CD', 'लिंगाला (कोङ्गो-किन्शासा)');
INSERT INTO "list" ("id", "value") VALUES ('lt', 'लिथुआनियाली');
INSERT INTO "list" ("id", "value") VALUES ('lt_LT', 'लिथुआनियाली (लिथुअनिया)');
INSERT INTO "list" ("id", "value") VALUES ('lu', 'लुबा-काताङ्गा');
INSERT INTO "list" ("id", "value") VALUES ('lu_CD', 'लुबा-काताङ्गा (कोङ्गो-किन्शासा)');
INSERT INTO "list" ("id", "value") VALUES ('my', 'वर्मेली');
INSERT INTO "list" ("id", "value") VALUES ('my_MM', 'वर्मेली (म्यान्मार (बर्मा))');
INSERT INTO "list" ("id", "value") VALUES ('be', 'वेलारूसी');
INSERT INTO "list" ("id", "value") VALUES ('be_BY', 'वेलारूसी (बेलारूस)');
INSERT INTO "list" ("id", "value") VALUES ('cy', 'वेल्श');
INSERT INTO "list" ("id", "value") VALUES ('cy_GB', 'वेल्श (बेलायत)');
INSERT INTO "list" ("id", "value") VALUES ('sn', 'शोना');
INSERT INTO "list" ("id", "value") VALUES ('sn_ZW', 'शोना (जिम्बाबे)');
INSERT INTO "list" ("id", "value") VALUES ('sr', 'सर्बियाली');
INSERT INTO "list" ("id", "value") VALUES ('sr_XK', 'सर्बियाली (कोसोवो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_BA', 'सर्बियाली (बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_ME', 'सर्बियाली (मोन्टेनेग्रो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Latn_XK', 'सर्बियाली (ल्याटिन, कोसोवो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Latn_BA', 'सर्बियाली (ल्याटिन, बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Latn_ME', 'सर्बियाली (ल्याटिन, मोन्टेनेग्रो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Latn_RS', 'सर्बियाली (ल्याटिन, सर्बिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Latn', 'सर्बियाली (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('sr_RS', 'सर्बियाली (सर्बिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Cyrl_XK', 'सर्बियाली (सिरिलिक, कोसोवो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Cyrl_BA', 'सर्बियाली (सिरिलिक, बोस्निया एण्ड हर्जगोभिनिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Cyrl_ME', 'सर्बियाली (सिरिलिक, मोन्टेनेग्रो)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Cyrl_RS', 'सर्बियाली (सिरिलिक, सर्बिया)');
INSERT INTO "list" ("id", "value") VALUES ('sr_Cyrl', 'सर्बियाली (सिरिलिक)');
INSERT INTO "list" ("id", "value") VALUES ('sg', 'साङ्गो');
INSERT INTO "list" ("id", "value") VALUES ('sg_CF', 'साङ्गो (केन्द्रीय अफ्रिकी गणतन्त्र)');
INSERT INTO "list" ("id", "value") VALUES ('ii', 'सिचुआन यि');
INSERT INTO "list" ("id", "value") VALUES ('ii_CN', 'सिचुआन यि (चीन)');
INSERT INTO "list" ("id", "value") VALUES ('si', 'सिन्हाला');
INSERT INTO "list" ("id", "value") VALUES ('si_LK', 'सिन्हाला (श्रीलङ्का)');
INSERT INTO "list" ("id", "value") VALUES ('so', 'सोमाली');
INSERT INTO "list" ("id", "value") VALUES ('so_ET', 'सोमाली (इथियोपिया)');
INSERT INTO "list" ("id", "value") VALUES ('so_KE', 'सोमाली (केन्या)');
INSERT INTO "list" ("id", "value") VALUES ('so_DJ', 'सोमाली (डिजिबुटी)');
INSERT INTO "list" ("id", "value") VALUES ('so_SO', 'सोमाली (सोमालिया)');
INSERT INTO "list" ("id", "value") VALUES ('es', 'स्पेनिस');
INSERT INTO "list" ("id", "value") VALUES ('es_AR', 'स्पेनिस (अर्जेन्टिना)');
INSERT INTO "list" ("id", "value") VALUES ('es_EC', 'स्पेनिस (इक्वडेर)');
INSERT INTO "list" ("id", "value") VALUES ('es_UY', 'स्पेनिस (उरूग्वे)');
INSERT INTO "list" ("id", "value") VALUES ('es_SV', 'स्पेनिस (एल् साल्भाडोर)');
INSERT INTO "list" ("id", "value") VALUES ('es_CO', 'स्पेनिस (कोलोम्बिया)');
INSERT INTO "list" ("id", "value") VALUES ('es_CR', 'स्पेनिस (कोष्टारिका)');
INSERT INTO "list" ("id", "value") VALUES ('es_IC', 'स्पेनिस (क्यानारी टापुहरू)');
INSERT INTO "list" ("id", "value") VALUES ('es_CU', 'स्पेनिस (क्युबा)');
INSERT INTO "list" ("id", "value") VALUES ('es_GT', 'स्पेनिस (ग्वाटेमाला)');
INSERT INTO "list" ("id", "value") VALUES ('es_CL', 'स्पेनिस (चिली)');
INSERT INTO "list" ("id", "value") VALUES ('es_DO', 'स्पेनिस (डोमिनिकन गणतन्त्र)');
INSERT INTO "list" ("id", "value") VALUES ('es_NI', 'स्पेनिस (निकारागुवा)');
INSERT INTO "list" ("id", "value") VALUES ('es_PA', 'स्पेनिस (पनामा)');
INSERT INTO "list" ("id", "value") VALUES ('es_PR', 'स्पेनिस (पुएर्टो रिको)');
INSERT INTO "list" ("id", "value") VALUES ('es_PE', 'स्पेनिस (पेरू)');
INSERT INTO "list" ("id", "value") VALUES ('es_PY', 'स्पेनिस (प्याराग्वे)');
INSERT INTO "list" ("id", "value") VALUES ('es_PH', 'स्पेनिस (फिलिपिन्स)');
INSERT INTO "list" ("id", "value") VALUES ('es_BO', 'स्पेनिस (बोलिभिया)');
INSERT INTO "list" ("id", "value") VALUES ('es_GQ', 'स्पेनिस (भू-मध्यीय गिनी)');
INSERT INTO "list" ("id", "value") VALUES ('es_VE', 'स्पेनिस (भेनेजुएला)');
INSERT INTO "list" ("id", "value") VALUES ('es_MX', 'स्पेनिस (मेक्सिको)');
INSERT INTO "list" ("id", "value") VALUES ('es_US', 'स्पेनिस (संयुक्त राज्य)');
INSERT INTO "list" ("id", "value") VALUES ('es_EA', 'स्पेनिस (सिउटा र मेलिला)');
INSERT INTO "list" ("id", "value") VALUES ('es_ES', 'स्पेनिस (स्पेन)');
INSERT INTO "list" ("id", "value") VALUES ('es_HN', 'स्पेनिस (हन्डुरास)');
INSERT INTO "list" ("id", "value") VALUES ('sk', 'स्लोभाकियाली');
INSERT INTO "list" ("id", "value") VALUES ('sk_SK', 'स्लोभाकियाली (स्लोभाकिया)');
INSERT INTO "list" ("id", "value") VALUES ('sl', 'स्लोभेनियाली');
INSERT INTO "list" ("id", "value") VALUES ('sl_SI', 'स्लोभेनियाली (स्लोभेनिया)');
INSERT INTO "list" ("id", "value") VALUES ('sw', 'स्वाहिली');
INSERT INTO "list" ("id", "value") VALUES ('sw_KE', 'स्वाहिली (केन्या)');
INSERT INTO "list" ("id", "value") VALUES ('sw_TZ', 'स्वाहिली (तान्जानिया)');
INSERT INTO "list" ("id", "value") VALUES ('sw_UG', 'स्वाहिली (युगाण्डा)');
INSERT INTO "list" ("id", "value") VALUES ('sv', 'स्विडिश');
INSERT INTO "list" ("id", "value") VALUES ('sv_AX', 'स्विडिश (अलान्ड टापुहरु)');
INSERT INTO "list" ("id", "value") VALUES ('sv_FI', 'स्विडिश (फिन्ल्याण्ड)');
INSERT INTO "list" ("id", "value") VALUES ('sv_SE', 'स्विडिश (स्विडेन)');
INSERT INTO "list" ("id", "value") VALUES ('hu', 'हंग्रीयाली');
INSERT INTO "list" ("id", "value") VALUES ('hu_HU', 'हंग्रीयाली (हङ्गेरी)');
INSERT INTO "list" ("id", "value") VALUES ('ha', 'हाउसा');
INSERT INTO "list" ("id", "value") VALUES ('ha_GH', 'हाउसा (घाना)');
INSERT INTO "list" ("id", "value") VALUES ('ha_NE', 'हाउसा (नाइजर)');
INSERT INTO "list" ("id", "value") VALUES ('ha_NG', 'हाउसा (नाइजेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('ha_Latn_GH', 'हाउसा (ल्याटिन, घाना)');
INSERT INTO "list" ("id", "value") VALUES ('ha_Latn_NE', 'हाउसा (ल्याटिन, नाइजर)');
INSERT INTO "list" ("id", "value") VALUES ('ha_Latn_NG', 'हाउसा (ल्याटिन, नाइजेरिया)');
INSERT INTO "list" ("id", "value") VALUES ('ha_Latn', 'हाउसा (ल्याटिन)');
INSERT INTO "list" ("id", "value") VALUES ('hi', 'हिन्दी');
INSERT INTO "list" ("id", "value") VALUES ('hi_IN', 'हिन्दी (भारत)');
INSERT INTO "list" ("id", "value") VALUES ('he', 'हिब्रु');
INSERT INTO "list" ("id", "value") VALUES ('he_IL', 'हिब्रु (इजरायल)');
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<symbolBean>
<value>470uH</value>
<rotation>0</rotation>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>739</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<name>L1</name>
<category>Passiv</category>
<width>69</width>
<height>80</height>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>64</y>
</start>
<end>
<x>30</x>
<y>60</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>56</y>
</start>
<end>
<x>30</x>
<y>52</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>48</y>
</start>
<end>
<x>30</x>
<y>44</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>24</y>
</start>
<end>
<x>30</x>
<y>28</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>32</y>
</start>
<end>
<x>30</x>
<y>36</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>24</x>
<y>64</y>
</start>
<end>
<x>24</x>
<y>80</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>24</x>
<y>16</y>
</start>
<end>
<x>24</x>
<y>0</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<lines>
<xLoc>0</xLoc>
<yLoc>0</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<start>
<x>30</x>
<y>16</y>
</start>
<end>
<x>30</x>
<y>20</y>
</end>
<startType>STRAIGHT</startType>
<endType>STRAIGHT</endType>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
</lines>
<ellipses>
<xLoc>16</xLoc>
<yLoc>52</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>12</width>
<height>12</height>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
<style>OUTLINED</style>
<startAngle>90</startAngle>
<rotation>180</rotation>
</ellipses>
<ellipses>
<xLoc>16</xLoc>
<yLoc>16</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>12</width>
<height>12</height>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
<style>OUTLINED</style>
<startAngle>90</startAngle>
<rotation>180</rotation>
</ellipses>
<ellipses>
<xLoc>16</xLoc>
<yLoc>28</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>12</width>
<height>12</height>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
<style>OUTLINED</style>
<startAngle>90</startAngle>
<rotation>180</rotation>
</ellipses>
<ellipses>
<xLoc>16</xLoc>
<yLoc>40</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>12</width>
<height>12</height>
<thickness>2.0</thickness>
<dashPhase>0.0</dashPhase>
<endCap>2</endCap>
<lineJoin>0</lineJoin>
<miterLimit>10.0</miterLimit>
<style>OUTLINED</style>
<startAngle>90</startAngle>
<rotation>180</rotation>
</ellipses>
<textparts>
<xLoc>0</xLoc>
<yLoc>52</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>0</width>
<height>0</height>
<rotation>-90</rotation>
<text>#Name</text>
<font>Arial</font>
<fontsize>12</fontsize>
<type>NAME</type>
</textparts>
<textparts>
<xLoc>31</xLoc>
<yLoc>57</yLoc>
<index>0</index>
<color>
<red>0</red>
<green>0</green>
<blue>0</blue>
</color>
<layer>0</layer>
<width>0</width>
<height>0</height>
<rotation>-90</rotation>
<text>#Value</text>
<font>Arial</font>
<fontsize>12</fontsize>
<type>VALUE</type>
</textparts>
<mirrorVertical>false</mirrorVertical>
<mirrorHorizontal>false</mirrorHorizontal>
<macroMode>true</macroMode>
<connections>
<pin>
<x>24</x>
<y>0</y>
<num>1</num>
<name>1</name>
<orientation>BOTTOM_RIGHT</orientation>
</pin>
<pin>
<x>24</x>
<y>80</y>
<num>2</num>
<name>2</name>
<orientation>BOTTOM_RIGHT</orientation>
</pin>
</connections>
<customModel>false</customModel>
<type>RESISTOR</type>
<filename>L1.xml</filename>
</symbolBean>
|
{
"pile_set_name": "Github"
}
|
// RUN: %clang_cc1 -std=c++1z %s -verify -fcxx-exceptions
struct S { int a, b, c; };
// A simple-declaration can be a decompsition declaration.
namespace SimpleDecl {
auto [a_x, b_x, c_x] = S();
void f(S s) {
auto [a, b, c] = S();
{
for (auto [a, b, c] = S();;) {}
if (auto [a, b, c] = S(); true) {}
switch (auto [a, b, c] = S(); 0) { case 0:; }
}
}
}
// A for-range-declaration can be a decomposition declaration.
namespace ForRangeDecl {
extern S arr[10];
void h() {
for (auto [a, b, c] : arr) {
}
}
}
// Other kinds of declaration cannot.
namespace OtherDecl {
// A parameter-declaration is not a simple-declaration.
// This parses as an array declaration.
void f(auto [a, b, c]); // expected-error {{'auto' not allowed in function prototype}} expected-error {{'a'}}
void g() {
// A condition is allowed as a Clang extension.
// See commentary in test/Parser/decomposed-condition.cpp
for (; auto [a, b, c] = S(); ) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{value of type 'S' is not contextually convertible to 'bool'}}
if (auto [a, b, c] = S()) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{value of type 'S' is not contextually convertible to 'bool'}}
if (int n; auto [a, b, c] = S()) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{value of type 'S' is not contextually convertible to 'bool'}}
switch (auto [a, b, c] = S()) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{statement requires expression of integer type ('S' invalid)}}
switch (int n; auto [a, b, c] = S()) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{statement requires expression of integer type ('S' invalid)}}
while (auto [a, b, c] = S()) {} // expected-warning {{ISO C++17 does not permit structured binding declaration in a condition}} expected-error {{value of type 'S' is not contextually convertible to 'bool'}}
// An exception-declaration is not a simple-declaration.
try {}
catch (auto [a, b, c]) {} // expected-error {{'auto' not allowed in exception declaration}} expected-error {{'a'}}
}
// A member-declaration is not a simple-declaration.
class A {
auto [a, b, c] = S(); // expected-error {{not permitted in this context}}
static auto [a, b, c] = S(); // expected-error {{not permitted in this context}}
};
}
namespace GoodSpecifiers {
void f() {
int n[1];
const volatile auto &[a] = n;
}
}
namespace BadSpecifiers {
typedef int I1[1];
I1 n;
struct S { int n; } s;
void f() {
// storage-class-specifiers
static auto &[a] = n; // expected-error {{cannot be declared 'static'}}
thread_local auto &[b] = n; // expected-error {{cannot be declared 'thread_local'}}
extern auto &[c] = n; // expected-error {{cannot be declared 'extern'}} expected-error {{cannot have an initializer}}
struct S {
mutable auto &[d] = n; // expected-error {{not permitted in this context}}
// function-specifiers
virtual auto &[e] = n; // expected-error {{not permitted in this context}}
explicit auto &[f] = n; // expected-error {{not permitted in this context}}
// misc decl-specifiers
friend auto &[g] = n; // expected-error {{'auto' not allowed}} expected-error {{friends can only be classes or functions}}
};
typedef auto &[h] = n; // expected-error {{cannot be declared 'typedef'}}
constexpr auto &[i] = n; // expected-error {{cannot be declared 'constexpr'}}
static constexpr thread_local auto &[j] = n; // expected-error {{cannot be declared with 'static thread_local constexpr' specifiers}}
}
inline auto &[k] = n; // expected-error {{cannot be declared 'inline'}}
const int K = 5;
void g() {
// defining-type-specifiers other than cv-qualifiers and 'auto'
S [a] = s; // expected-error {{cannot be declared with type 'BadSpecifiers::S'}}
decltype(auto) [b] = s; // expected-error {{cannot be declared with type 'decltype(auto)'}}
auto ([c]) = s; // expected-error {{cannot be declared with parentheses}}
// FIXME: This error is not very good.
auto [d]() = s; // expected-error {{expected ';'}} expected-error {{expected expression}}
auto [e][1] = s; // expected-error {{expected ';'}} expected-error {{requires an initializer}}
// FIXME: This should fire the 'misplaced array declarator' diagnostic.
int [K] arr = {0}; // expected-error {{expected ';'}} expected-error {{cannot be declared with type 'int'}} expected-error {{decomposition declaration '[K]' requires an initializer}}
int [5] arr = {0}; // expected-error {{place the brackets after the name}}
auto *[f] = s; // expected-error {{cannot be declared with type 'auto *'}} expected-error {{incompatible initializer}}
auto S::*[g] = s; // expected-error {{cannot be declared with type 'auto BadSpecifiers::S::*'}} expected-error {{incompatible initializer}}
// ref-qualifiers are OK.
auto &&[ok_1] = S();
auto &[ok_2] = s;
// attributes are OK.
[[]] auto [ok_3] = s;
alignas(S) auto [ok_4] = s;
// ... but not after the identifier or declarator.
// FIXME: These errors are not very good.
auto [bad_attr_1 [[]]] = s; // expected-error {{attribute list cannot appear here}} expected-error 2{{}}
auto [bad_attr_2] [[]] = s; // expected-error {{expected ';'}} expected-error {{}}
}
}
namespace MultiDeclarator {
struct S { int n; };
void f(S s) {
auto [a] = s, [b] = s; // expected-error {{must be the only declaration}}
auto [c] = s, d = s; // expected-error {{must be the only declaration}}
auto e = s, [f] = s; // expected-error {{must be the only declaration}}
auto g = s, h = s, i = s, [j] = s; // expected-error {{must be the only declaration}}
}
}
namespace Template {
int n[3];
// FIXME: There's no actual rule against this...
template<typename T> auto [a, b, c] = n; // expected-error {{decomposition declaration template not supported}}
}
namespace Init {
void f() {
int arr[1];
struct S { int n; };
auto &[bad1]; // expected-error {{decomposition declaration '[bad1]' requires an initializer}}
const auto &[bad2](S{}, S{}); // expected-error {{initializer for variable '[bad2]' with type 'const auto &' contains multiple expressions}}
const auto &[bad3](); // expected-error {{expected expression}}
auto &[good1] = arr;
auto &&[good2] = S{};
const auto &[good3](S{});
S [goodish3] = { 4 }; // expected-error {{cannot be declared with type 'S'}}
S [goodish4] { 4 }; // expected-error {{cannot be declared with type 'S'}}
}
}
|
{
"pile_set_name": "Github"
}
|
// Variables
//
// Copy settings from this file into the provided `_custom.scss` to override
// the Bootstrap defaults without modifying key, versioned files.
// Table of Contents
//
// Colors
// Options
// Spacing
// Body
// Links
// Grid breakpoints
// Grid containers
// Grid columns
// Fonts
// Components
// Tables
// Buttons
// Forms
// Dropdowns
// Z-index master list
// Navbar
// Navs
// Pagination
// Jumbotron
// Form states and alerts
// Cards
// Tooltips
// Popovers
// Badges
// Modals
// Alerts
// Progress bars
// List group
// Image thumbnails
// Figures
// Breadcrumbs
// Carousel
// Close
// Code
@mixin _assert-ascending($map, $map-name) {
$prev-key: null;
$prev-num: null;
@each $key, $num in $map {
@if $prev-num == null {
// Do nothing
} @else if not comparable($prev-num, $num) {
@warn "Potentially invalid value for #{$map-name}: This map must be in ascending order, but key '#{$key}' has value #{$num} whose unit makes it incomparable to #{$prev-num}, the value of the previous key '#{$prev-key}' !";
} @else if $prev-num >= $num {
@warn "Invalid value for #{$map-name}: This map must be in ascending order, but key '#{$key}' has value #{$num} which isn't greater than #{$prev-num}, the value of the previous key '#{$prev-key}' !";
}
$prev-key: $key;
$prev-num: $num;
}
}
// Replace `$search` with `$replace` in `$string`
// @author Hugo Giraudel
// @param {String} $string - Initial string
// @param {String} $search - Substring to replace
// @param {String} $replace ('') - New value
// @return {String} - Updated string
@function str-replace($string, $search, $replace: "") {
$index: str-index($string, $search);
@if $index {
@return str-slice($string, 1, $index - 1) + $replace + str-replace(str-slice($string, $index + str-length($search)), $search, $replace);
}
@return $string;
}
@mixin _assert-starts-at-zero($map) {
$values: map-values($map);
$first-value: nth($values, 1);
@if $first-value != 0 {
@warn "First breakpoint in `$grid-breakpoints` must start at 0, but starts at #{$first-value}.";
}
}
// General variable structure
//
// Variable format should follow the `$component-modifier-state-property` order.
// Colors
//
// Grayscale and brand colors for use across Bootstrap.
// Start with assigning color names to specific hex values.
$white: #fff !default;
$black: #000 !default;
$red: #d9534f !default;
$orange: #f0ad4e !default;
$yellow: #ffd500 !default;
$green: #5cb85c !default;
$blue: #0275d8 !default;
$teal: #5bc0de !default;
$pink: #ff5b77 !default;
$purple: #613d7c !default;
// Create grayscale
$gray-dark: #292b2c !default;
$gray: #464a4c !default;
$gray-light: #636c72 !default;
$gray-lighter: #eceeef !default;
$gray-lightest: #f7f7f9 !default;
// Reassign color vars to semantic color scheme
$brand-primary: $blue !default;
$brand-success: $green !default;
$brand-info: $teal !default;
$brand-warning: $orange !default;
$brand-danger: $red !default;
$brand-inverse: $gray-dark !default;
// Options
//
// Quickly modify global styling by enabling or disabling optional features.
$enable-rounded: true !default;
$enable-shadows: false !default;
$enable-gradients: false !default;
$enable-transitions: true !default;
$enable-hover-media-query: false !default;
$enable-grid-classes: true !default;
$enable-print-styles: true !default;
// Spacing
//
// Control the default styling of most Bootstrap elements by modifying these
// variables. Mostly focused on spacing.
// You can add more entries to the $spacers map, should you need more variation.
$spacer: 1rem !default;
$spacer-x: $spacer !default;
$spacer-y: $spacer !default;
$spacers: (
0: (
x: 0,
y: 0
),
1: (
x: ($spacer-x * .25),
y: ($spacer-y * .25)
),
2: (
x: ($spacer-x * .5),
y: ($spacer-y * .5)
),
3: (
x: $spacer-x,
y: $spacer-y
),
4: (
x: ($spacer-x * 1.5),
y: ($spacer-y * 1.5)
),
5: (
x: ($spacer-x * 3),
y: ($spacer-y * 3)
)
) !default;
$border-width: 1px !default;
// This variable affects the `.h-*` and `.w-*` classes.
$sizes: (
25: 25%,
50: 50%,
75: 75%,
100: 100%
) !default;
// Body
//
// Settings for the `<body>` element.
$body-bg: $white !default;
$body-color: $gray-dark !default;
$inverse-bg: $gray-dark !default;
$inverse-color: $gray-lighter !default;
// Links
//
// Style anchor elements.
$link-color: $brand-primary !default;
$link-decoration: none !default;
$link-hover-color: darken($link-color, 15%) !default;
$link-hover-decoration: underline !default;
// Grid breakpoints
//
// Define the minimum dimensions at which your layout will change,
// adapting to different screen sizes, for use in media queries.
$grid-breakpoints: (
xs: 0,
sm: 576px,
md: 768px,
lg: 992px,
xl: 1200px
) !default;
@include _assert-ascending($grid-breakpoints, "$grid-breakpoints");
@include _assert-starts-at-zero($grid-breakpoints);
// Grid containers
//
// Define the maximum width of `.container` for different screen sizes.
$container-max-widths: (
sm: 540px,
md: 720px,
lg: 960px,
xl: 1140px
) !default;
@include _assert-ascending($container-max-widths, "$container-max-widths");
// Grid columns
//
// Set the number of columns and specify the width of the gutters.
$grid-columns: 12 !default;
$grid-gutter-width-base: 30px !default;
$grid-gutter-widths: (
xs: $grid-gutter-width-base,
sm: $grid-gutter-width-base,
md: $grid-gutter-width-base,
lg: $grid-gutter-width-base,
xl: $grid-gutter-width-base
) !default;
// Fonts
//
// Font, line-height, and color for body text, headings, and more.
$font-family-sans-serif: -apple-system, system-ui, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif !default;
$font-family-serif: Georgia, "Times New Roman", Times, serif !default;
$font-family-monospace: Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace !default;
$font-family-base: $font-family-sans-serif !default;
$font-size-base: 1rem !default; // Assumes the browser default, typically `16px`
$font-size-lg: 1.25rem !default;
$font-size-sm: .875rem !default;
$font-size-xs: .75rem !default;
$font-weight-normal: normal !default;
$font-weight-bold: bold !default;
$font-weight-base: $font-weight-normal !default;
$line-height-base: 1.5 !default;
$font-size-h1: 2.5rem !default;
$font-size-h2: 2rem !default;
$font-size-h3: 1.75rem !default;
$font-size-h4: 1.5rem !default;
$font-size-h5: 1.25rem !default;
$font-size-h6: 1rem !default;
$headings-margin-bottom: ($spacer / 2) !default;
$headings-font-family: inherit !default;
$headings-font-weight: 500 !default;
$headings-line-height: 1.1 !default;
$headings-color: inherit !default;
$display1-size: 6rem !default;
$display2-size: 5.5rem !default;
$display3-size: 4.5rem !default;
$display4-size: 3.5rem !default;
$display1-weight: 300 !default;
$display2-weight: 300 !default;
$display3-weight: 300 !default;
$display4-weight: 300 !default;
$display-line-height: $headings-line-height !default;
$lead-font-size: 1.25rem !default;
$lead-font-weight: 300 !default;
$small-font-size: 80% !default;
$text-muted: $gray-light !default;
$abbr-border-color: $gray-light !default;
$blockquote-small-color: $gray-light !default;
$blockquote-font-size: ($font-size-base * 1.25) !default;
$blockquote-border-color: $gray-lighter !default;
$blockquote-border-width: .25rem !default;
$hr-border-color: rgba($black,.1) !default;
$hr-border-width: $border-width !default;
$mark-padding: .2em !default;
$dt-font-weight: $font-weight-bold !default;
$kbd-box-shadow: inset 0 -.1rem 0 rgba($black,.25) !default;
$nested-kbd-font-weight: $font-weight-bold !default;
$list-inline-padding: 5px !default;
// Components
//
// Define common padding and border radius sizes and more.
$line-height-lg: (4 / 3) !default;
$line-height-sm: 1.5 !default;
$border-radius: .25rem !default;
$border-radius-lg: .3rem !default;
$border-radius-sm: .2rem !default;
$component-active-color: $white !default;
$component-active-bg: $brand-primary !default;
$caret-width: .3em !default;
$transition-base: all .2s ease-in-out !default;
$transition-fade: opacity .15s linear !default;
$transition-collapse: height .35s ease !default;
// Tables
//
// Customizes the `.table` component with basic values, each used across all table variations.
$table-cell-padding: .75rem !default;
$table-sm-cell-padding: .3rem !default;
$table-bg: transparent !default;
$table-inverse-bg: $gray-dark !default;
$table-inverse-color: $body-bg !default;
$table-bg-accent: rgba($black,.05) !default;
$table-bg-hover: rgba($black,.075) !default;
$table-bg-active: $table-bg-hover !default;
$table-head-bg: $gray-lighter !default;
$table-head-color: $gray !default;
$table-border-width: $border-width !default;
$table-border-color: $gray-lighter !default;
// Buttons
//
// For each of Bootstrap's buttons, define text, background and border color.
$btn-padding-x: 1rem !default;
$btn-padding-y: .5rem !default;
$btn-line-height: 1.25 !default;
$btn-font-weight: $font-weight-normal !default;
$btn-box-shadow: inset 0 1px 0 rgba($white,.15), 0 1px 1px rgba($black,.075) !default;
$btn-focus-box-shadow: 0 0 0 2px rgba($brand-primary, .25) !default;
$btn-active-box-shadow: inset 0 3px 5px rgba($black,.125) !default;
$btn-primary-color: $white !default;
$btn-primary-bg: $brand-primary !default;
$btn-primary-border: $btn-primary-bg !default;
$btn-secondary-color: $gray-dark !default;
$btn-secondary-bg: $white !default;
$btn-secondary-border: #ccc !default;
$btn-info-color: $white !default;
$btn-info-bg: $brand-info !default;
$btn-info-border: $btn-info-bg !default;
$btn-success-color: $white !default;
$btn-success-bg: $brand-success !default;
$btn-success-border: $btn-success-bg !default;
$btn-warning-color: $white !default;
$btn-warning-bg: $brand-warning !default;
$btn-warning-border: $btn-warning-bg !default;
$btn-danger-color: $white !default;
$btn-danger-bg: $brand-danger !default;
$btn-danger-border: $btn-danger-bg !default;
$btn-link-disabled-color: $gray-light !default;
$btn-padding-x-sm: .5rem !default;
$btn-padding-y-sm: .25rem !default;
$btn-padding-x-lg: 1.5rem !default;
$btn-padding-y-lg: .75rem !default;
$btn-block-spacing-y: .5rem !default;
$btn-toolbar-margin: .5rem !default;
// Allows for customizing button radius independently from global border radius
$btn-border-radius: $border-radius !default;
$btn-border-radius-lg: $border-radius-lg !default;
$btn-border-radius-sm: $border-radius-sm !default;
$btn-transition: all .2s ease-in-out !default;
// Forms
$input-padding-x: .75rem !default;
$input-padding-y: .5rem !default;
$input-line-height: 1.25 !default;
$input-bg: $white !default;
$input-bg-disabled: $gray-lighter !default;
$input-color: $gray !default;
$input-border-color: rgba($black,.15) !default;
$input-btn-border-width: $border-width !default; // For form controls and buttons
$input-box-shadow: inset 0 1px 1px rgba($black,.075) !default;
$input-border-radius: $border-radius !default;
$input-border-radius-lg: $border-radius-lg !default;
$input-border-radius-sm: $border-radius-sm !default;
$input-bg-focus: $input-bg !default;
$input-border-focus: lighten($brand-primary, 25%) !default;
$input-box-shadow-focus: $input-box-shadow, rgba($input-border-focus, .6) !default;
$input-color-focus: $input-color !default;
$input-color-placeholder: $gray-light !default;
$input-padding-x-sm: .5rem !default;
$input-padding-y-sm: .25rem !default;
$input-padding-x-lg: 1.5rem !default;
$input-padding-y-lg: .75rem !default;
$input-height: (($font-size-base * $input-line-height) + ($input-padding-y * 2)) !default;
$input-height-lg: (($font-size-lg * $line-height-lg) + ($input-padding-y-lg * 2)) !default;
$input-height-sm: (($font-size-sm * $line-height-sm) + ($input-padding-y-sm * 2)) !default;
$input-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s !default;
$form-text-margin-top: .25rem !default;
$form-feedback-margin-top: $form-text-margin-top !default;
$form-check-margin-bottom: .5rem !default;
$form-check-input-gutter: 1.25rem !default;
$form-check-input-margin-y: .25rem !default;
$form-check-input-margin-x: .25rem !default;
$form-check-inline-margin-x: .75rem !default;
$form-group-margin-bottom: $spacer-y !default;
$input-group-addon-bg: $gray-lighter !default;
$input-group-addon-border-color: $input-border-color !default;
$cursor-disabled: not-allowed !default;
$custom-control-gutter: 1.5rem !default;
$custom-control-spacer-x: 1rem !default;
$custom-control-spacer-y: .25rem !default;
$custom-control-indicator-size: 1rem !default;
$custom-control-indicator-margin-y: (($line-height-base * 1rem) - $custom-control-indicator-size) / -2 !default;
$custom-control-indicator-bg: #ddd !default;
$custom-control-indicator-bg-size: 50% 50% !default;
$custom-control-indicator-box-shadow: inset 0 .25rem .25rem rgba($black,.1) !default;
$custom-control-disabled-cursor: $cursor-disabled !default;
$custom-control-disabled-indicator-bg: $gray-lighter !default;
$custom-control-disabled-description-color: $gray-light !default;
$custom-control-checked-indicator-color: $white !default;
$custom-control-checked-indicator-bg: $brand-primary !default;
$custom-control-checked-indicator-box-shadow: none !default;
$custom-control-focus-indicator-box-shadow: 0 0 0 1px $body-bg, 0 0 0 3px $brand-primary !default;
$custom-control-active-indicator-color: $white !default;
$custom-control-active-indicator-bg: lighten($brand-primary, 35%) !default;
$custom-control-active-indicator-box-shadow: none !default;
$custom-checkbox-radius: $border-radius !default;
$custom-checkbox-checked-icon: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='#{$custom-control-checked-indicator-color}' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E"), "#", "%23") !default;
$custom-checkbox-indeterminate-bg: $brand-primary !default;
$custom-checkbox-indeterminate-indicator-color: $custom-control-checked-indicator-color !default;
$custom-checkbox-indeterminate-icon: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='#{$custom-checkbox-indeterminate-indicator-color}' d='M0 2h4'/%3E%3C/svg%3E"), "#", "%23") !default;
$custom-checkbox-indeterminate-box-shadow: none !default;
$custom-radio-radius: 50% !default;
$custom-radio-checked-icon: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='#{$custom-control-checked-indicator-color}'/%3E%3C/svg%3E"), "#", "%23") !default;
$custom-select-padding-x: .75rem !default;
$custom-select-padding-y: .375rem !default;
$custom-select-indicator-padding: 1rem !default; // Extra padding to account for the presence of the background-image based indicator
$custom-select-line-height: $input-line-height !default;
$custom-select-color: $input-color !default;
$custom-select-disabled-color: $gray-light !default;
$custom-select-bg: $white !default;
$custom-select-disabled-bg: $gray-lighter !default;
$custom-select-bg-size: 8px 10px !default; // In pixels because image dimensions
$custom-select-indicator-color: #333 !default;
$custom-select-indicator: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='#{$custom-select-indicator-color}' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E"), "#", "%23") !default;
$custom-select-border-width: $input-btn-border-width !default;
$custom-select-border-color: $input-border-color !default;
$custom-select-border-radius: $border-radius !default;
$custom-select-focus-border-color: lighten($brand-primary, 25%) !default;
$custom-select-focus-box-shadow: inset 0 1px 2px rgba($black, .075), 0 0 5px rgba($custom-select-focus-border-color, .5) !default;
$custom-select-sm-padding-y: .2rem !default;
$custom-select-sm-font-size: 75% !default;
$custom-file-height: 2.5rem !default;
$custom-file-width: 14rem !default;
$custom-file-focus-box-shadow: 0 0 0 .075rem $white, 0 0 0 .2rem $brand-primary !default;
$custom-file-padding-x: .5rem !default;
$custom-file-padding-y: 1rem !default;
$custom-file-line-height: 1.5 !default;
$custom-file-color: $gray !default;
$custom-file-bg: $white !default;
$custom-file-border-width: $border-width !default;
$custom-file-border-color: $input-border-color !default;
$custom-file-border-radius: $border-radius !default;
$custom-file-box-shadow: inset 0 .2rem .4rem rgba($black,.05) !default;
$custom-file-button-color: $custom-file-color !default;
$custom-file-button-bg: $gray-lighter !default;
$custom-file-text: (
placeholder: (
en: "Choose file..."
),
button-label: (
en: "Browse"
)
) !default;
// Form validation icons
$form-icon-success-color: $brand-success !default;
$form-icon-success: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='#{$form-icon-success-color}' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E"), "#", "%23") !default;
$form-icon-warning-color: $brand-warning !default;
$form-icon-warning: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='#{$form-icon-warning-color}' d='M4.4 5.324h-.8v-2.46h.8zm0 1.42h-.8V5.89h.8zM3.76.63L.04 7.075c-.115.2.016.425.26.426h7.397c.242 0 .372-.226.258-.426C6.726 4.924 5.47 2.79 4.253.63c-.113-.174-.39-.174-.494 0z'/%3E%3C/svg%3E"), "#", "%23") !default;
$form-icon-danger-color: $brand-danger !default;
$form-icon-danger: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='#{$form-icon-danger-color}' viewBox='-2 -2 7 7'%3E%3Cpath stroke='%23d9534f' d='M0 0l3 3m0-3L0 3'/%3E%3Ccircle r='.5'/%3E%3Ccircle cx='3' r='.5'/%3E%3Ccircle cy='3' r='.5'/%3E%3Ccircle cx='3' cy='3' r='.5'/%3E%3C/svg%3E"), "#", "%23") !default;
// Dropdowns
//
// Dropdown menu container and contents.
$dropdown-min-width: 10rem !default;
$dropdown-padding-y: .5rem !default;
$dropdown-margin-top: .125rem !default;
$dropdown-bg: $white !default;
$dropdown-border-color: rgba($black,.15) !default;
$dropdown-border-width: $border-width !default;
$dropdown-divider-bg: $gray-lighter !default;
$dropdown-box-shadow: 0 .5rem 1rem rgba($black,.175) !default;
$dropdown-link-color: $gray-dark !default;
$dropdown-link-hover-color: darken($gray-dark, 5%) !default;
$dropdown-link-hover-bg: $gray-lightest !default;
$dropdown-link-active-color: $component-active-color !default;
$dropdown-link-active-bg: $component-active-bg !default;
$dropdown-link-disabled-color: $gray-light !default;
$dropdown-item-padding-x: 1.5rem !default;
$dropdown-header-color: $gray-light !default;
// Z-index master list
//
// Warning: Avoid customizing these values. They're used for a bird's eye view
// of components dependent on the z-axis and are designed to all work together.
$zindex-dropdown-backdrop: 990 !default;
$zindex-navbar: 1000 !default;
$zindex-dropdown: 1000 !default;
$zindex-fixed: 1030 !default;
$zindex-sticky: 1030 !default;
$zindex-modal-backdrop: 1040 !default;
$zindex-modal: 1050 !default;
$zindex-popover: 1060 !default;
$zindex-tooltip: 1070 !default;
// Navbar
$navbar-border-radius: $border-radius !default;
$navbar-padding-x: $spacer !default;
$navbar-padding-y: ($spacer / 2) !default;
$navbar-brand-padding-y: .25rem !default;
$navbar-toggler-padding-x: .75rem !default;
$navbar-toggler-padding-y: .25rem !default;
$navbar-toggler-font-size: $font-size-lg !default;
$navbar-toggler-border-radius: $btn-border-radius !default;
$navbar-inverse-color: rgba($white,.5) !default;
$navbar-inverse-hover-color: rgba($white,.75) !default;
$navbar-inverse-active-color: rgba($white,1) !default;
$navbar-inverse-disabled-color: rgba($white,.25) !default;
$navbar-inverse-toggler-bg: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='#{$navbar-inverse-color}' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E%3C/svg%3E"), "#", "%23") !default;
$navbar-inverse-toggler-border: rgba($white,.1) !default;
$navbar-light-color: rgba($black,.5) !default;
$navbar-light-hover-color: rgba($black,.7) !default;
$navbar-light-active-color: rgba($black,.9) !default;
$navbar-light-disabled-color: rgba($black,.3) !default;
$navbar-light-toggler-bg: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='#{$navbar-light-color}' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E%3C/svg%3E"), "#", "%23") !default;
$navbar-light-toggler-border: rgba($black,.1) !default;
// Navs
$nav-item-margin: .2rem !default;
$nav-item-inline-spacer: 1rem !default;
$nav-link-padding: .5em 1em !default;
$nav-link-hover-bg: $gray-lighter !default;
$nav-disabled-link-color: $gray-light !default;
$nav-tabs-border-color: #ddd !default;
$nav-tabs-border-width: $border-width !default;
$nav-tabs-border-radius: $border-radius !default;
$nav-tabs-link-hover-border-color: $gray-lighter !default;
$nav-tabs-active-link-hover-color: $gray !default;
$nav-tabs-active-link-hover-bg: $body-bg !default;
$nav-tabs-active-link-hover-border-color: #ddd !default;
$nav-tabs-justified-link-border-color: #ddd !default;
$nav-tabs-justified-active-link-border-color: $body-bg !default;
$nav-pills-border-radius: $border-radius !default;
$nav-pills-active-link-color: $component-active-color !default;
$nav-pills-active-link-bg: $component-active-bg !default;
// Pagination
$pagination-padding-x: .75rem !default;
$pagination-padding-y: .5rem !default;
$pagination-padding-x-sm: .5rem !default;
$pagination-padding-y-sm: .25rem !default;
$pagination-padding-x-lg: 1.5rem !default;
$pagination-padding-y-lg: .75rem !default;
$pagination-line-height: 1.25 !default;
$pagination-color: $link-color !default;
$pagination-bg: $white !default;
$pagination-border-width: $border-width !default;
$pagination-border-color: #ddd !default;
$pagination-hover-color: $link-hover-color !default;
$pagination-hover-bg: $gray-lighter !default;
$pagination-hover-border: #ddd !default;
$pagination-active-color: $white !default;
$pagination-active-bg: $brand-primary !default;
$pagination-active-border: $brand-primary !default;
$pagination-disabled-color: $gray-light !default;
$pagination-disabled-bg: $white !default;
$pagination-disabled-border: #ddd !default;
// Jumbotron
$jumbotron-padding: 2rem !default;
$jumbotron-bg: $gray-lighter !default;
// Form states and alerts
//
// Define colors for form feedback states and, by default, alerts.
$state-success-text: #3c763d !default;
$state-success-bg: #dff0d8 !default;
$state-success-border: darken($state-success-bg, 5%) !default;
$state-info-text: #31708f !default;
$state-info-bg: #d9edf7 !default;
$state-info-border: darken($state-info-bg, 7%) !default;
$state-warning-text: #8a6d3b !default;
$state-warning-bg: #fcf8e3 !default;
$mark-bg: $state-warning-bg !default;
$state-warning-border: darken($state-warning-bg, 5%) !default;
$state-danger-text: #a94442 !default;
$state-danger-bg: #f2dede !default;
$state-danger-border: darken($state-danger-bg, 5%) !default;
// Cards
$card-spacer-x: 1.25rem !default;
$card-spacer-y: .75rem !default;
$card-border-width: 1px !default;
$card-border-radius: $border-radius !default;
$card-border-color: rgba($black,.125) !default;
$card-border-radius-inner: calc(#{$card-border-radius} - #{$card-border-width}) !default;
$card-cap-bg: $gray-lightest !default;
$card-bg: $white !default;
$card-link-hover-color: $white !default;
$card-img-overlay-padding: 1.25rem !default;
$card-deck-margin: ($grid-gutter-width-base / 2) !default;
$card-columns-count: 3 !default;
$card-columns-gap: 1.25rem !default;
$card-columns-margin: $card-spacer-y !default;
// Tooltips
$tooltip-max-width: 200px !default;
$tooltip-color: $white !default;
$tooltip-bg: $black !default;
$tooltip-opacity: .9 !default;
$tooltip-padding-y: 3px !default;
$tooltip-padding-x: 8px !default;
$tooltip-margin: 3px !default;
$tooltip-arrow-width: 5px !default;
$tooltip-arrow-color: $tooltip-bg !default;
// Popovers
$popover-inner-padding: 1px !default;
$popover-bg: $white !default;
$popover-max-width: 276px !default;
$popover-border-width: $border-width !default;
$popover-border-color: rgba($black,.2) !default;
$popover-box-shadow: 0 5px 10px rgba($black,.2) !default;
$popover-title-bg: darken($popover-bg, 3%) !default;
$popover-title-padding-x: 14px !default;
$popover-title-padding-y: 8px !default;
$popover-content-padding-x: 14px !default;
$popover-content-padding-y: 9px !default;
$popover-arrow-width: 10px !default;
$popover-arrow-color: $popover-bg !default;
$popover-arrow-outer-width: ($popover-arrow-width + 1px) !default;
$popover-arrow-outer-color: fade-in($popover-border-color, .05) !default;
// Badges
$badge-default-bg: $gray-light !default;
$badge-primary-bg: $brand-primary !default;
$badge-success-bg: $brand-success !default;
$badge-info-bg: $brand-info !default;
$badge-warning-bg: $brand-warning !default;
$badge-danger-bg: $brand-danger !default;
$badge-color: $white !default;
$badge-link-hover-color: $white !default;
$badge-font-size: 75% !default;
$badge-font-weight: $font-weight-bold !default;
$badge-padding-x: .4em !default;
$badge-padding-y: .25em !default;
$badge-pill-padding-x: .6em !default;
// Use a higher than normal value to ensure completely rounded edges when
// customizing padding or font-size on labels.
$badge-pill-border-radius: 10rem !default;
// Modals
// Padding applied to the modal body
$modal-inner-padding: 15px !default;
$modal-dialog-margin: 10px !default;
$modal-dialog-sm-up-margin-y: 30px !default;
$modal-title-line-height: $line-height-base !default;
$modal-content-bg: $white !default;
$modal-content-border-color: rgba($black,.2) !default;
$modal-content-border-width: $border-width !default;
$modal-content-xs-box-shadow: 0 3px 9px rgba($black,.5) !default;
$modal-content-sm-up-box-shadow: 0 5px 15px rgba($black,.5) !default;
$modal-backdrop-bg: $black !default;
$modal-backdrop-opacity: .5 !default;
$modal-header-border-color: $gray-lighter !default;
$modal-footer-border-color: $modal-header-border-color !default;
$modal-header-border-width: $modal-content-border-width !default;
$modal-footer-border-width: $modal-header-border-width !default;
$modal-header-padding: 15px !default;
$modal-lg: 800px !default;
$modal-md: 500px !default;
$modal-sm: 300px !default;
$modal-transition: transform .3s ease-out !default;
// Alerts
//
// Define alert colors, border radius, and padding.
$alert-padding-x: 1.25rem !default;
$alert-padding-y: .75rem !default;
$alert-margin-bottom: $spacer-y !default;
$alert-border-radius: $border-radius !default;
$alert-link-font-weight: $font-weight-bold !default;
$alert-border-width: $border-width !default;
$alert-success-bg: $state-success-bg !default;
$alert-success-text: $state-success-text !default;
$alert-success-border: $state-success-border !default;
$alert-info-bg: $state-info-bg !default;
$alert-info-text: $state-info-text !default;
$alert-info-border: $state-info-border !default;
$alert-warning-bg: $state-warning-bg !default;
$alert-warning-text: $state-warning-text !default;
$alert-warning-border: $state-warning-border !default;
$alert-danger-bg: $state-danger-bg !default;
$alert-danger-text: $state-danger-text !default;
$alert-danger-border: $state-danger-border !default;
// Progress bars
$progress-height: 1rem !default;
$progress-font-size: .75rem !default;
$progress-bg: $gray-lighter !default;
$progress-border-radius: $border-radius !default;
$progress-box-shadow: inset 0 .1rem .1rem rgba($black,.1) !default;
$progress-bar-color: $white !default;
$progress-bar-bg: $brand-primary !default;
$progress-bar-animation-timing: 1s linear infinite !default;
// List group
$list-group-color: $body-color !default;
$list-group-bg: $white !default;
$list-group-border-color: rgba($black,.125) !default;
$list-group-border-width: $border-width !default;
$list-group-border-radius: $border-radius !default;
$list-group-item-padding-x: 1.25rem !default;
$list-group-item-padding-y: .75rem !default;
$list-group-hover-bg: $gray-lightest !default;
$list-group-active-color: $component-active-color !default;
$list-group-active-bg: $component-active-bg !default;
$list-group-active-border: $list-group-active-bg !default;
$list-group-active-text-color: lighten($list-group-active-bg, 50%) !default;
$list-group-disabled-color: $gray-light !default;
$list-group-disabled-bg: $list-group-bg !default;
$list-group-disabled-text-color: $list-group-disabled-color !default;
$list-group-link-color: $gray !default;
$list-group-link-heading-color: $gray-dark !default;
$list-group-link-hover-color: $list-group-link-color !default;
$list-group-link-active-color: $list-group-color !default;
$list-group-link-active-bg: $gray-lighter !default;
// Image thumbnails
$thumbnail-padding: .25rem !default;
$thumbnail-bg: $body-bg !default;
$thumbnail-border-width: $border-width !default;
$thumbnail-border-color: #ddd !default;
$thumbnail-border-radius: $border-radius !default;
$thumbnail-box-shadow: 0 1px 2px rgba($black,.075) !default;
$thumbnail-transition: all .2s ease-in-out !default;
// Figures
$figure-caption-font-size: 90% !default;
$figure-caption-color: $gray-light !default;
// Breadcrumbs
$breadcrumb-padding-y: .75rem !default;
$breadcrumb-padding-x: 1rem !default;
$breadcrumb-item-padding: .5rem !default;
$breadcrumb-bg: $gray-lighter !default;
$breadcrumb-divider-color: $gray-light !default;
$breadcrumb-active-color: $gray-light !default;
$breadcrumb-divider: "/" !default;
// Carousel
$carousel-control-color: $white !default;
$carousel-control-width: 15% !default;
$carousel-control-opacity: .5 !default;
$carousel-indicator-width: 30px !default;
$carousel-indicator-height: 3px !default;
$carousel-indicator-spacer: 3px !default;
$carousel-indicator-active-bg: $white !default;
$carousel-caption-width: 70% !default;
$carousel-caption-color: $white !default;
$carousel-control-icon-width: 20px !default;
$carousel-control-prev-icon-bg: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='#{$carousel-control-color}' viewBox='0 0 8 8'%3E%3Cpath d='M4 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E"), "#", "%23") !default;
$carousel-control-next-icon-bg: str-replace(url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='#{$carousel-control-color}' viewBox='0 0 8 8'%3E%3Cpath d='M1.5 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E"), "#", "%23") !default;
$carousel-transition: transform .6s ease-in-out !default;
// Close
$close-font-size: $font-size-base * 1.5 !default;
$close-font-weight: $font-weight-bold !default;
$close-color: $black !default;
$close-text-shadow: 0 1px 0 $white !default;
// Code
$code-font-size: 90% !default;
$code-padding-x: .4rem !default;
$code-padding-y: .2rem !default;
$code-color: #bd4147 !default;
$code-bg: $gray-lightest !default;
$kbd-color: $white !default;
$kbd-bg: $gray-dark !default;
$pre-bg: $gray-lightest !default;
$pre-color: $gray-dark !default;
$pre-border-color: #ccc !default;
$pre-scrollable-max-height: 340px !default;
|
{
"pile_set_name": "Github"
}
|
# Upgrading Forseti with Terraform
## Introduction
<walkthrough-tutorial-duration duration="30"></walkthrough-tutorial-duration>
This guide explains how to upgrade Forseti previously installed with Terraform,
to version 2.23. This is due to a breaking change introduced in this Terraform
module, now version 5.0.0. The steps outlined in this guide should not be needed
after Forseti has been upgraded versions 2.23 or above.
If you have any
questions about this process, please contact us by
[email](mailto:discuss@forsetisecurity.org) or on
[Slack](https://forsetisecurity.slack.com/join/shared_invite/enQtNDIyMzg4Nzg1NjcxLTM1NTUzZmM2ODVmNzE5MWEwYzAwNjUxMjVkZjhmYWZiOGZjMjY3ZjllNDlkYjk1OGU4MTVhZGM4NzgyZjZhNTE).
## Prerequisites
Before you begin the migration process, you will need:
- A Forseti deployment of at least v2.18.0; follow the
[upgrade guide](https://forsetisecurity.org/docs/latest/setup/upgrade.html) as
necessary deployed via the [terraform-google-forseti Terraform module](https://github.com/forseti-security/terraform-google-forseti).
Please note that the upper bound of the upgrades possible for the Python installer is 2.22.
- A version of the
[Terraform command-line interface](https://www.terraform.io/downloads.html)
in the 0.12 series.
- The ID of the GCP project in which Forseti is deployed.
- A service account in the organization with the
[roles required by the Terraform module](https://registry.terraform.io/modules/terraform-google-modules/forseti/google/4.3.0#iam-roles).
- A
[JSON key file](https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys)
for the service account.
- If you are an Org Admin in the organization in which you deploying Forseti, a separate Service Account and Key are recommended,
but not required.
- **Strongly recommended out of an overabundance of caution:** A backup of your current state.
- In the Forseti Server's GCS Bucket
- Scanner rules
- Server config file
- Scanner Violations
- Inventory summary
- [CloudSQL database](https://cloud.google.com/sql/docs/mysql/backup-recovery/backups)
## Configuring Terraform
Terraform can assume the identity of a service account through a
strategy called
[Application Default Credentials](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application)
when provisioning resources. To enable this approach, set the
appropriate environment variable to the path of the service account JSON
key file:
```sh
export GOOGLE_APPLICATION_CREDENTIALS="PATH_TO_JSON_KEY_FILE"
```
As stated in the pre-requisites, if you have Org Admin privilges, you do not need to complete this step.
## Backup the Terraform State
In a shell, navigate to the folder containing your user-defined Terraform module, most likely in a **main.tf**.
The following command will simultaneously backup your existing Terraform state and remove resource from the current state file. This will not affect your existing Forseti deployment.
```sh
terraform state rm $(terraform state list)
```
## Update main.tf
In order to support this upgrade, we'll need to update a few input variables.
### Source
The **source** will need to point to the root terraform-google-forseti module.
```
source = "terraform-google-modules/forseti/google"
```
If you have cloned the module to your local file system, you may set the **source** path to
the directory containing the module.
### Version
The **version** will need to be 5.0.0.
```
version = "5.0.0"
```
If you have set the **source** path to the directory containing the module, omit the **version** variable.
### Region
If you have a **region**, it will need to be split into the cloudsql_region, server_region, and client_region
variables. You will also need to set the location of the Forseti Client and Server storage buckets
(**storage_bucket_location**) as well as the CAI storage bucket (**bucket_cai_location**).
**NOTE:** In order to prevent data loss to your CloudSQL database, please double check the region where
your CloudSQL instance currently exists and update the **cloudsql_region** variable, accourdingly.
Before (example):
```
region = "us-central1"
```
After (example):
```
cloudsql_region = "us-central1"
server_region = "us-central1"
client_region = "us-central1"
storage_bucket_location = "us-central1"
bucket_cai_location = "us-central1"
```
### Credentials path
Remove the **credentials_path** variable if present. The `google` provider now solely relies on the _GOOGLE_APPLICATION_CREDENTIALS_ environment variable.
### Root Resource identity
Add the **resource_name_suffix** variable and set it to the resource suffix. The suffix can be found appended to the Forseti Server VM, for example.
```
resource_name_suffix = "abc123efg"
```
### Server Rules and Login
Add the following clause to the bottom of your main.tf.
```
client_instance_metadata = {
enable-oslogin = "TRUE"
}
enable_write = true
manage_rules_enabled = false
```
## Obtain and Run the Import Script
### Obtain the Import Script
This [import script](https://github.com/forseti-security/terraform-google-forseti/blob/module-release-5.0.0/helpers/import.sh) will import the Forseti GCP resources into a local state file.
```sh
curl --location --remote-name https://raw.githubusercontent.com/forseti-security/terraform-google-forseti/module-release-5.0.0/helpers/import.sh
chmod +x import.sh
./import.sh -h
```
### Initialize the Terraform Module
```sh
terraform init
```
### Import the Existing Terraform State
Import the existing resources to the Terraform state, replacing the
uppercase values with the aforementioned values:
```sh
./import.sh -m MODULE_LOCAL_NAME -o ORG_ID -p PROJECT_ID -s RESOURCE_NAME_SUFFIX -z GCE_ZONE [-n NETWORK_PROJECT_ID]
```
Observe the expected Terraform changes by execution `terraform plan`. As stated in the introduction, if you have any
questions about this process, please contact us by
[email](mailto:discuss@forsetisecurity.org) or on
[Slack](https://forsetisecurity.slack.com/join/shared_invite/enQtNDIyMzg4Nzg1NjcxLTM1NTUzZmM2ODVmNzE5MWEwYzAwNjUxMjVkZjhmYWZiOGZjMjY3ZjllNDlkYjk1OGU4MTVhZGM4NzgyZjZhNTE).
## Terraform Plan
It is strongly recommend to execute `terraform plan` before `terraform apply`. This
will provide you an opportunity to review changes Terraform is planning to make
to your deployment.
```sh
terraform plan
```
### Terraform Changes
Because there is not an exact mapping between the deprecated Python
Installer and the Terraform module, some changes will occur when
Terraform assumes management of the Forseti deployment.
You should carefully review this section as well as the output from
`terraform plan` to ensure that all changes are expected and acceptable.
Observe the expected Terraform changes. As stated in the introduction, if you have any
questions about this process, please contact us by
e-mail at discuss@forsetisecurity.org or on
[Slack](https://forsetisecurity.slack.com/join/shared_invite/enQtNDIyMzg4Nzg1NjcxLTM1NTUzZmM2ODVmNzE5MWEwYzAwNjUxMjVkZjhmYWZiOGZjMjY3ZjllNDlkYjk1OGU4MTVhZGM4NzgyZjZhNTE).
#### Created
- The `forseti-client-gcp-RESOURCE_NAME_SUFFIX` service account will
gain the Cloud Trace Agent (`roles/cloudtrace.agent`) role
- The `forseti-client-gcp-RESOURCE_NAME_SUFFIX` service account will
gain the Cloud Trace Agent (`roles/storage.objectViewer`) role
- The `forseti-server-gcp-RESOURCE_NAME_SUFFIX` service account will
gain the following roles. Note your server service account likely
has these roles already. Terraform re-applying them is essentially
a no-op.
- Cloud Trace Agent (`roles/cloudtrace.agent`)
- IAM Service Account Token Creator (`roles/iam.serviceAccountTokenCreator`)
- App Engine Viewer (`roles/appengine.appViewer`)
- BigQuery Meta-data Viewer (`roles/bigquery.metadataViewer`)
- Project Reader (`roles/browser`)
- Cloud Asset Viewer (`roles/cloudasset.viewer`)
- CloudSQL Viewer (`roles/cloudsql.viewer`)
- Network Viewer (`roles/compute.networkViewer`)
- Security Reviewer (`roles/iam.securityReviewer`)
- Organization Policy Viewer (`roles/orgpolicy.policyViewer`)
- Sevice Management Quota Viewer (`roles/servicemanagement.quotaViewer`)
- Service Usage Consumer (`roles/serviceusage.serviceUsageConsumer`)
- Compute Security Admin (`roles/compute.securityAdmin`)
- Storage Object Viewer (`roles/storage.objectViewer`)
- Storage Object Creator (`roles/storage.objectCreator`)
- CloudSQL Client (`roles/cloudsql.client`)
- Stackdriver Log Writer (`roles/logging.logWriter`)
- Service Account Token Creator (`roles/iam.serviceAccountTokenCreator`)
#### Updated In-Place
- The `forseti-client-deny-all-RESOURCE_NAME_SUFFIX` firewall rule and
the `forseti-server-deny-all-RESOURCE_NAME_SUFFIX` firewall rule will
both update from denying all protocols to denying ICMP, TCP, and UDP
- The `forseti-server-allow-grpc-RESOURCE_NAME_SUFFIX` firewall rule
will update to only allow traffic from the
`forseti-client-gcp-RESOURCE_NAME_SUFFIX` service account and to allow
traffic to port 50052 in addition to 50051
- The `forseti-cai-export-RESOURCE_NAME_SUFFIX`, `forseti-client-RESOURCE_NAME_SUFFIX` and `forseti-server-RESOURCE_NAME_SUFFIX`
GCS bucket to set `force_destroy` to `true`.
- The `forseti-client-gcp-RESOURCE_NAME_SUFFIX` and `forseti-server-gcp-RESOURCE_NAME_SUFFIX`
service accounts will be updated in place to change the display name
- The `forseti-server-db-RESOURCE_NAME_SUFFIX` CloudSQL database will
increase in resource size. It will also gain the `net_write_timeout`
flag.
#### Destroyed and Replaced
- The `forseti-client-allow-ssh-external-RESOURCE_NAME_SUFFIX` firewall
rule and the `forseti-server-allow-ssh-external-RESOURCE_NAME_SUFFIX`
firewall rule will both be replaced due to a naming change, but the new firewall
rules will be equivalent
- The `forseti-client-vm-RESOURCE_NAME_SUFFIX` compute instance and the
`forseti-server-vm-RESOURCE_NAME_SUFFIX` compute instance will be
replaced due to changes in metadata startup scripts, boot disk sizes
and boot disk types; these VMs should be stateless but ensure that
any customizations are captured before applying this change
- The `configs/forseti_conf_client.yaml` object in the
`forseti-client-RESOURCE_NAME_SUFFIX` storage bucket and the
`configs/forseti_conf_server.yaml` object in the
`forseti-server-RESOURCE_NAME_SUFFIX` storage bucket will be replaced
due to a lack of Terraform import support
## Apply the Terraform Changes
Execute the following to apply the Terraform plan.
```sh
terraform apply
```
## Client VM Endpoint
It is possible that the *forseti_conf_client.yaml* did not get updated with the right
**server_ip** address. This is a known issue and is being investigated. Please perform
the following steps.
1. Update the **server_ip** in your `forseti-client-RESOURCE_NAME_SUFFIX/configs/forseti_config_client.yaml`
file if necessary.
2. Reset your client VM.
## Upgrade Complete
<walkthrough-conclusion-trophy></walkthrough-conclusion-trophy>
You have completed upgrading Forseti to 2.23 with the re-architected
terraform-google-forseti Terraform module!
|
{
"pile_set_name": "Github"
}
|
{
"parent": "builtin/generated",
"textures": {
"layer0": "flansmod:items/MillsBomb"
},
"display": {
"thirdperson_lefthand": {
"rotation": [
0,
90,
-35
],
"translation": [
0,
1.25,
-2.5
],
"scale": [
0.85,
0.85,
0.85
]
},
"thirdperson_righthand": {
"rotation": [
0,
90,
-35
],
"translation": [
0,
1.25,
-2.5
],
"scale": [
0.85,
0.85,
0.85
]
},
"firstperson_lefthand": {
"rotation": [
0,
-45,
25
],
"translation": [
0,
4,
2
],
"scale": [
0.85,
0.85,
0.85
]
},
"firstperson_righthand": {
"rotation": [
0,
-45,
25
],
"translation": [
0,
4,
2
],
"scale": [
0.85,
0.85,
0.85
]
}
}
}
|
{
"pile_set_name": "Github"
}
|
-- -----------------------------
-- 导出时间 `2016-12-13 22:26:46`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_advert`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_advert`;
CREATE TABLE `dp_cms_advert` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`typeid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '分类id',
`tagname` varchar(30) NOT NULL DEFAULT '' COMMENT '广告位标识',
`ad_type` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '广告类型',
`timeset` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '时间限制:0-永不过期,1-在设内时间内有效',
`start_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '开始时间',
`end_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '结束时间',
`name` varchar(60) NOT NULL DEFAULT '' COMMENT '广告位名称',
`content` text NOT NULL COMMENT '广告内容',
`expcontent` text NOT NULL COMMENT '过期显示内容',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='广告表';
-- -----------------------------
-- 表数据 `dp_cms_advert`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_advert_type`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_advert_type`;
CREATE TABLE `dp_cms_advert_type` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(32) NOT NULL DEFAULT '' COMMENT '分类名称',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='广告分类表';
-- -----------------------------
-- 表数据 `dp_cms_advert_type`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_column`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_column`;
CREATE TABLE `dp_cms_column` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`pid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '父级id',
`name` varchar(32) NOT NULL DEFAULT '' COMMENT '栏目名称',
`model` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '文档模型id',
`url` varchar(255) NOT NULL DEFAULT '' COMMENT '链接',
`target` varchar(16) NOT NULL DEFAULT '_self' COMMENT '链接打开方式',
`content` text NOT NULL COMMENT '内容',
`icon` varchar(64) NOT NULL DEFAULT '' COMMENT '字体图标',
`index_template` varchar(32) NOT NULL DEFAULT '' COMMENT '封面模板',
`list_template` varchar(32) NOT NULL DEFAULT '' COMMENT '列表页模板',
`detail_template` varchar(32) NOT NULL DEFAULT '' COMMENT '详情页模板',
`post_auth` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '投稿权限',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`sort` int(11) NOT NULL DEFAULT '100' COMMENT '排序',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
`hide` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '是否隐藏',
`rank_auth` int(11) NOT NULL DEFAULT '0' COMMENT '浏览权限,-1待审核,0为开放浏览,大于0则为对应的用户角色id',
`type` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '栏目属性:0-最终列表栏目,1-外部链接,2-频道封面',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='栏目表';
-- -----------------------------
-- 表数据 `dp_cms_column`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_document`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_document`;
CREATE TABLE `dp_cms_document` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`cid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '栏目id',
`model` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '文档模型ID',
`title` varchar(256) NOT NULL DEFAULT '' COMMENT '标题',
`shorttitle` varchar(32) NOT NULL DEFAULT '' COMMENT '简略标题',
`uid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '用户ID',
`flag` set('j','p','b','s','a','f','c','h') DEFAULT NULL COMMENT '自定义属性',
`view` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '阅读量',
`comment` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '评论数',
`good` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '点赞数',
`bad` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '踩数',
`mark` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '收藏数量',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`sort` int(11) NOT NULL DEFAULT '100' COMMENT '排序',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
`trash` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '回收站',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='文档基础表';
-- -----------------------------
-- 表数据 `dp_cms_document`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_field`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_field`;
CREATE TABLE `dp_cms_field` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '字段名称',
`name` varchar(32) NOT NULL,
`title` varchar(32) NOT NULL DEFAULT '' COMMENT '字段标题',
`type` varchar(32) NOT NULL DEFAULT '' COMMENT '字段类型',
`define` varchar(128) NOT NULL DEFAULT '' COMMENT '字段定义',
`value` text NULL COMMENT '默认值',
`options` text NULL COMMENT '额外选项',
`tips` varchar(256) NOT NULL DEFAULT '' COMMENT '提示说明',
`fixed` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '是否为固定字段',
`show` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '是否显示',
`model` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '所属文档模型id',
`ajax_url` varchar(256) NOT NULL DEFAULT '' COMMENT '联动下拉框ajax地址',
`next_items` varchar(256) NOT NULL DEFAULT '' COMMENT '联动下拉框的下级下拉框名,多个以逗号隔开',
`param` varchar(32) NOT NULL DEFAULT '' COMMENT '联动下拉框请求参数名',
`format` varchar(32) NOT NULL DEFAULT '' COMMENT '格式,用于格式文本',
`table` varchar(32) NOT NULL DEFAULT '' COMMENT '表名,只用于快速联动类型',
`level` tinyint(2) unsigned NOT NULL DEFAULT '2' COMMENT '联动级别,只用于快速联动类型',
`key` varchar(32) NOT NULL DEFAULT '' COMMENT '键字段,只用于快速联动类型',
`option` varchar(32) NOT NULL DEFAULT '' COMMENT '值字段,只用于快速联动类型',
`pid` varchar(32) NOT NULL DEFAULT '' COMMENT '父级id字段,只用于快速联动类型',
`ak` varchar(32) NOT NULL DEFAULT '' COMMENT '百度地图appkey',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`sort` int(11) NOT NULL DEFAULT '100' COMMENT '排序',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=18 DEFAULT CHARSET=utf8 COMMENT='文档字段表';
-- -----------------------------
-- 表数据 `dp_cms_field`
-- -----------------------------
INSERT INTO `dp_cms_field` VALUES ('1', 'id', 'ID', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', 'ID', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480562978', '1480562978', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('2', 'cid', '栏目', 'select', 'int(11) UNSIGNED NOT NULL', '0', '', '请选择所属栏目', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480562978', '1480562978', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('3', 'uid', '用户ID', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563110', '1480563110', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('4', 'model', '模型ID', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563110', '1480563110', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('5', 'title', '标题', 'text', 'varchar(128) NOT NULL', '', '', '文档标题', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480575844', '1480576134', '1', '1');
INSERT INTO `dp_cms_field` VALUES ('6', 'shorttitle', '简略标题', 'text', 'varchar(32) NOT NULL', '', '', '简略标题', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480575844', '1480576134', '1', '1');
INSERT INTO `dp_cms_field` VALUES ('7', 'flag', '自定义属性', 'checkbox', 'set(\'j\',\'p\',\'b\',\'s\',\'a\',\'f\',\'h\',\'c\') NULL DEFAULT NULL', '', 'j:跳转\r\np:图片\r\nb:加粗\r\ns:滚动\r\na:特荐\r\nf:幻灯\r\nh:头条\r\nc:推荐', '自定义属性', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480671258', '1480671258', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('8', 'view', '阅读量', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480563149', '1480563149', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('9', 'comment', '评论数', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563189', '1480563189', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('10', 'good', '点赞数', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563279', '1480563279', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('11', 'bad', '踩数', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563330', '1480563330', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('12', 'mark', '收藏数量', 'text', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563372', '1480563372', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('13', 'create_time', '创建时间', 'datetime', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563406', '1480563406', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('14', 'update_time', '更新时间', 'datetime', 'int(11) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563432', '1480563432', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('15', 'sort', '排序', 'text', 'int(11) NOT NULL', '100', '', '', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480563510', '1480563510', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('16', 'status', '状态', 'radio', 'tinyint(2) UNSIGNED NOT NULL', '1', '0:禁用\r\n1:启用', '', '0', '1', '0', '', '', '', '', '', '0', '', '', '', '', '1480563576', '1480563576', '100', '1');
INSERT INTO `dp_cms_field` VALUES ('17', 'trash', '回收站', 'text', 'tinyint(2) UNSIGNED NOT NULL', '0', '', '', '0', '0', '0', '', '', '', '', '', '0', '', '', '', '', '1480563576', '1480563576', '100', '1');
-- -----------------------------
-- 表结构 `dp_cms_link`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_link`;
CREATE TABLE `dp_cms_link` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`type` tinyint(2) unsigned NOT NULL DEFAULT '1' COMMENT '类型:1-文字链接,2-图片链接',
`title` varchar(128) NOT NULL DEFAULT '' COMMENT '链接标题',
`url` varchar(255) NOT NULL DEFAULT '' COMMENT '链接地址',
`logo` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '链接LOGO',
`contact` varchar(255) NOT NULL DEFAULT '' COMMENT '联系方式',
`sort` int(11) NOT NULL DEFAULT '100',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='有钱链接表';
-- -----------------------------
-- 表数据 `dp_cms_link`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_menu`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_menu`;
CREATE TABLE `dp_cms_menu` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`nid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '导航id',
`pid` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '父级id',
`column` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '栏目id',
`page` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '单页id',
`type` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '类型:0-栏目链接,1-单页链接,2-自定义链接',
`title` varchar(128) NOT NULL DEFAULT '' COMMENT '菜单标题',
`url` varchar(255) NOT NULL DEFAULT '' COMMENT '链接',
`css` varchar(64) NOT NULL DEFAULT '' COMMENT 'css类',
`rel` varchar(64) NOT NULL DEFAULT '' COMMENT '链接关系网',
`target` varchar(16) NOT NULL DEFAULT '' COMMENT '打开方式',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`sort` int(11) NOT NULL DEFAULT '100' COMMENT '排序',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='菜单表';
-- -----------------------------
-- 表数据 `dp_cms_menu`
-- -----------------------------
INSERT INTO `dp_cms_menu` VALUES ('1', '1', '0', '0', '0', '2', '首页', 'cms/index/index', '', '', '_self', '1492345605', '1492345605', '100', '1');
INSERT INTO `dp_cms_menu` VALUES ('2', '2', '0', '0', '0', '2', '关于我们', 'http://www.dolphinphp.com', '', '', '_self', '1492346763', '1492346763', '100', '1');
INSERT INTO `dp_cms_menu` VALUES ('3', '3', '0', '0', '0', '2', '开发文档', 'http://www.kancloud.cn/ming5112/dolphinphp', '', '', '_self', '1492346812', '1492346812', '100', '1');
INSERT INTO `dp_cms_menu` VALUES ('4', '3', '0', '0', '0', '2', '开发者社区', 'http://bbs.dolphinphp.com/', '', '', '_self', '1492346832', '1492346832', '100', '1');
INSERT INTO `dp_cms_menu` VALUES ('5', '1', '0', '0', '0', '2', '二级菜单', 'http://www.dolphinphp.com', '', '', '_self', '1492347372', '1492347510', '100', '1');
INSERT INTO `dp_cms_menu` VALUES ('6', '1', '5', '0', '0', '2', '子菜单', 'http://www.dolphinphp.com', '', '', '_self', '1492347388', '1492347520', '100', '1');
-- -----------------------------
-- 表结构 `dp_cms_model`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_model`;
CREATE TABLE `dp_cms_model` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(32) NOT NULL DEFAULT '' COMMENT '模型名称',
`title` varchar(32) NOT NULL DEFAULT '' COMMENT '模型标题',
`table` varchar(64) NOT NULL DEFAULT '' COMMENT '附加表名称',
`type` tinyint(2) NOT NULL DEFAULT '1' COMMENT '模型类别:0-系统模型,1-普通模型,2-独立模型',
`icon` varchar(64) NOT NULL,
`sort` int(11) NOT NULL DEFAULT '100' COMMENT '排序',
`system` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '是否系统模型',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='内容模型表';
-- -----------------------------
-- 表结构 `dp_cms_nav`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_nav`;
CREATE TABLE `dp_cms_nav` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`tag` varchar(32) NOT NULL DEFAULT '' COMMENT '导航标识',
`title` varchar(32) NOT NULL DEFAULT '' COMMENT '菜单标题',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 COMMENT='导航表';
-- -----------------------------
-- 表数据 `dp_cms_nav`
-- -----------------------------
INSERT INTO `dp_cms_nav` VALUES ('1', 'main_nav', '顶部导航', '1492345083', '1492345083', '1');
INSERT INTO `dp_cms_nav` VALUES ('2', 'about_nav', '底部关于', '1492346685', '1492346685', '1');
INSERT INTO `dp_cms_nav` VALUES ('3', 'support_nav', '服务与支持', '1492346715', '1492346715', '1');
-- -----------------------------
-- 表结构 `dp_cms_page`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_page`;
CREATE TABLE `dp_cms_page` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`title` varchar(64) NOT NULL DEFAULT '' COMMENT '单页标题',
`content` mediumtext NOT NULL COMMENT '单页内容',
`keywords` varchar(32) NOT NULL DEFAULT '' COMMENT '关键词',
`description` varchar(250) NOT NULL DEFAULT '' COMMENT '页面描述',
`template` varchar(32) NOT NULL DEFAULT '' COMMENT '模板文件',
`cover` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '单页封面',
`view` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '阅读量',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='单页表';
-- -----------------------------
-- 表数据 `dp_cms_page`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_slider`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_slider`;
CREATE TABLE `dp_cms_slider` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`title` varchar(32) NOT NULL DEFAULT '' COMMENT '标题',
`cover` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '封面id',
`url` varchar(255) NOT NULL DEFAULT '' COMMENT '链接地址',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
`sort` int(11) unsigned NOT NULL DEFAULT '100' COMMENT '排序',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='滚动图片表';
-- -----------------------------
-- 表数据 `dp_cms_slider`
-- -----------------------------
-- -----------------------------
-- 表结构 `dp_cms_support`
-- -----------------------------
DROP TABLE IF EXISTS `dp_cms_support`;
CREATE TABLE `dp_cms_support` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '客服名称',
`qq` varchar(16) NOT NULL DEFAULT '' COMMENT 'QQ',
`msn` varchar(100) NOT NULL DEFAULT '' COMMENT 'msn',
`taobao` varchar(100) NOT NULL DEFAULT '' COMMENT 'taobao',
`alibaba` varchar(100) NOT NULL DEFAULT '' COMMENT 'alibaba',
`skype` varchar(100) NOT NULL DEFAULT '' COMMENT 'skype',
`status` tinyint(2) unsigned NOT NULL DEFAULT '0' COMMENT '状态',
`sort` int(11) unsigned NOT NULL DEFAULT '100' COMMENT '排序',
`create_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '创建时间',
`update_time` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='客服表';
-- -----------------------------
-- 表数据 `dp_cms_support`
-- -----------------------------
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "vp8/encoder/denoising.h"
#include "vpx_mem/vpx_mem.h"
#include "./vp8_rtcd.h"
/*
* The filter function was modified to reduce the computational complexity.
*
* Step 1:
* Instead of applying tap coefficients for each pixel, we calculated the
* pixel adjustments vs. pixel diff value ahead of time.
* adjustment = filtered_value - current_raw
* = (filter_coefficient * diff + 128) >> 8
* where
* filter_coefficient = (255 << 8) / (256 + ((abs_diff * 330) >> 3));
* filter_coefficient += filter_coefficient /
* (3 + motion_magnitude_adjustment);
* filter_coefficient is clamped to 0 ~ 255.
*
* Step 2:
* The adjustment vs. diff curve becomes flat very quick when diff increases.
* This allowed us to use only several levels to approximate the curve without
* changing the filtering algorithm too much.
* The adjustments were further corrected by checking the motion magnitude.
* The levels used are:
* diff level adjustment w/o adjustment w/
* motion correction motion correction
* [-255, -16] 3 -6 -7
* [-15, -8] 2 -4 -5
* [-7, -4] 1 -3 -4
* [-3, 3] 0 diff diff
* [4, 7] 1 3 4
* [8, 15] 2 4 5
* [16, 255] 3 6 7
*/
int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y,
int mc_running_avg_y_stride,
unsigned char *running_avg_y,
int running_avg_y_stride, unsigned char *sig,
int sig_stride, unsigned int motion_magnitude,
int increase_denoising) {
/* If motion_magnitude is small, making the denoiser more aggressive by
* increasing the adjustment for each level, level1 adjustment is
* increased, the deltas stay the same.
*/
int shift_inc =
(increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
? 1
: 0;
const uint8x16_t v_level1_adjustment = vmovq_n_u8(
(motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
int64x2_t v_sum_diff_total = vdupq_n_s64(0);
/* Go over lines. */
int r;
for (r = 0; r < 16; ++r) {
/* Load inputs. */
const uint8x16_t v_sig = vld1q_u8(sig);
const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
/* Calculate absolute difference and sign masks. */
const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
/* Figure out which level that put us in. */
const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
/* Calculate absolute adjustments for level 1, 2 and 3. */
const uint8x16_t v_level2_adjustment =
vandq_u8(v_level2_mask, v_delta_level_1_and_2);
const uint8x16_t v_level3_adjustment =
vandq_u8(v_level3_mask, v_delta_level_2_and_3);
const uint8x16_t v_level1and2_adjustment =
vaddq_u8(v_level1_adjustment, v_level2_adjustment);
const uint8x16_t v_level1and2and3_adjustment =
vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
/* Figure adjustment absolute value by selecting between the absolute
* difference if in level0 or the value for level 1, 2 and 3.
*/
const uint8x16_t v_abs_adjustment =
vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
/* Calculate positive and negative adjustments. Apply them to the signal
* and accumulate them. Adjustments are less than eight and the maximum
* sum of them (7 * 16) can fit in a signed char.
*/
const uint8x16_t v_pos_adjustment =
vandq_u8(v_diff_pos_mask, v_abs_adjustment);
const uint8x16_t v_neg_adjustment =
vandq_u8(v_diff_neg_mask, v_abs_adjustment);
uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
/* Store results. */
vst1q_u8(running_avg_y, v_running_avg_y);
/* Sum all the accumulators to have the sum of all pixel differences
* for this macroblock.
*/
{
const int8x16_t v_sum_diff =
vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
vreinterpretq_s8_u8(v_neg_adjustment));
const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
const int32x4_t fedc_ba98_7654_3210 =
vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
}
/* Update pointers for next iteration. */
sig += sig_stride;
mc_running_avg_y += mc_running_avg_y_stride;
running_avg_y += running_avg_y_stride;
}
/* Too much adjustments => copy block. */
{
int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
vget_low_s64(v_sum_diff_total));
int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
int sum_diff_thresh = SUM_DIFF_THRESHOLD;
if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
if (sum_diff > sum_diff_thresh) {
// Before returning to copy the block (i.e., apply no denoising),
// checK if we can still apply some (weaker) temporal filtering to
// this block, that would otherwise not be denoised at all. Simplest
// is to apply an additional adjustment to running_avg_y to bring it
// closer to sig. The adjustment is capped by a maximum delta, and
// chosen such that in most cases the resulting sum_diff will be
// within the accceptable range given by sum_diff_thresh.
// The delta is set by the excess of absolute pixel diff over the
// threshold.
int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
const uint8x16_t k_delta = vmovq_n_u8(delta);
sig -= sig_stride * 16;
mc_running_avg_y -= mc_running_avg_y_stride * 16;
running_avg_y -= running_avg_y_stride * 16;
for (r = 0; r < 16; ++r) {
uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
const uint8x16_t v_sig = vld1q_u8(sig);
const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
/* Calculate absolute difference and sign masks. */
const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_pos_mask =
vcltq_u8(v_sig, v_mc_running_avg_y);
const uint8x16_t v_diff_neg_mask =
vcgtq_u8(v_sig, v_mc_running_avg_y);
// Clamp absolute difference to delta to get the adjustment.
const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
const uint8x16_t v_pos_adjustment =
vandq_u8(v_diff_pos_mask, v_abs_adjustment);
const uint8x16_t v_neg_adjustment =
vandq_u8(v_diff_neg_mask, v_abs_adjustment);
v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
/* Store results. */
vst1q_u8(running_avg_y, v_running_avg_y);
{
const int8x16_t v_sum_diff =
vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
vreinterpretq_s8_u8(v_pos_adjustment));
const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
const int32x4_t fedc_ba98_7654_3210 =
vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
const int64x2_t fedcba98_76543210 =
vpaddlq_s32(fedc_ba98_7654_3210);
v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
}
/* Update pointers for next iteration. */
sig += sig_stride;
mc_running_avg_y += mc_running_avg_y_stride;
running_avg_y += running_avg_y_stride;
}
{
// Update the sum of all pixel differences of this MB.
x = vqadd_s64(vget_high_s64(v_sum_diff_total),
vget_low_s64(v_sum_diff_total));
sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
if (sum_diff > sum_diff_thresh) {
return COPY_BLOCK;
}
}
} else {
return COPY_BLOCK;
}
}
}
/* Tell above level that block was filtered. */
running_avg_y -= running_avg_y_stride * 16;
sig -= sig_stride * 16;
vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride);
return FILTER_BLOCK;
}
int vp8_denoiser_filter_uv_neon(unsigned char *mc_running_avg,
int mc_running_avg_stride,
unsigned char *running_avg,
int running_avg_stride, unsigned char *sig,
int sig_stride, unsigned int motion_magnitude,
int increase_denoising) {
/* If motion_magnitude is small, making the denoiser more aggressive by
* increasing the adjustment for each level, level1 adjustment is
* increased, the deltas stay the same.
*/
int shift_inc =
(increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV)
? 1
: 0;
const uint8x16_t v_level1_adjustment = vmovq_n_u8(
(motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3);
const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
int64x2_t v_sum_diff_total = vdupq_n_s64(0);
int r;
{
uint16x4_t v_sum_block = vdup_n_u16(0);
// Avoid denoising color signal if its close to average level.
for (r = 0; r < 8; ++r) {
const uint8x8_t v_sig = vld1_u8(sig);
const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig);
v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10);
sig += sig_stride;
}
sig -= sig_stride * 8;
{
const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block);
const uint64x1_t _76543210 = vpaddl_u32(_7654_3210);
const int sum_block = vget_lane_s32(vreinterpret_s32_u64(_76543210), 0);
if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) {
return COPY_BLOCK;
}
}
}
/* Go over lines. */
for (r = 0; r < 4; ++r) {
/* Load inputs. */
const uint8x8_t v_sig_lo = vld1_u8(sig);
const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
const uint8x8_t v_mc_running_avg_hi =
vld1_u8(&mc_running_avg[mc_running_avg_stride]);
const uint8x16_t v_mc_running_avg =
vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
/* Calculate absolute difference and sign masks. */
const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
/* Figure out which level that put us in. */
const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
/* Calculate absolute adjustments for level 1, 2 and 3. */
const uint8x16_t v_level2_adjustment =
vandq_u8(v_level2_mask, v_delta_level_1_and_2);
const uint8x16_t v_level3_adjustment =
vandq_u8(v_level3_mask, v_delta_level_2_and_3);
const uint8x16_t v_level1and2_adjustment =
vaddq_u8(v_level1_adjustment, v_level2_adjustment);
const uint8x16_t v_level1and2and3_adjustment =
vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
/* Figure adjustment absolute value by selecting between the absolute
* difference if in level0 or the value for level 1, 2 and 3.
*/
const uint8x16_t v_abs_adjustment =
vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
/* Calculate positive and negative adjustments. Apply them to the signal
* and accumulate them. Adjustments are less than eight and the maximum
* sum of them (7 * 16) can fit in a signed char.
*/
const uint8x16_t v_pos_adjustment =
vandq_u8(v_diff_pos_mask, v_abs_adjustment);
const uint8x16_t v_neg_adjustment =
vandq_u8(v_diff_neg_mask, v_abs_adjustment);
uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment);
v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment);
/* Store results. */
vst1_u8(running_avg, vget_low_u8(v_running_avg));
vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg));
/* Sum all the accumulators to have the sum of all pixel differences
* for this macroblock.
*/
{
const int8x16_t v_sum_diff =
vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
vreinterpretq_s8_u8(v_neg_adjustment));
const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
const int32x4_t fedc_ba98_7654_3210 =
vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
}
/* Update pointers for next iteration. */
sig += sig_stride * 2;
mc_running_avg += mc_running_avg_stride * 2;
running_avg += running_avg_stride * 2;
}
/* Too much adjustments => copy block. */
{
int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
vget_low_s64(v_sum_diff_total));
int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV;
if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV;
if (sum_diff > sum_diff_thresh) {
// Before returning to copy the block (i.e., apply no denoising),
// checK if we can still apply some (weaker) temporal filtering to
// this block, that would otherwise not be denoised at all. Simplest
// is to apply an additional adjustment to running_avg_y to bring it
// closer to sig. The adjustment is capped by a maximum delta, and
// chosen such that in most cases the resulting sum_diff will be
// within the accceptable range given by sum_diff_thresh.
// The delta is set by the excess of absolute pixel diff over the
// threshold.
int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
// Only apply the adjustment for max delta up to 3.
if (delta < 4) {
const uint8x16_t k_delta = vmovq_n_u8(delta);
sig -= sig_stride * 8;
mc_running_avg -= mc_running_avg_stride * 8;
running_avg -= running_avg_stride * 8;
for (r = 0; r < 4; ++r) {
const uint8x8_t v_sig_lo = vld1_u8(sig);
const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]);
const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi);
const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg);
const uint8x8_t v_mc_running_avg_hi =
vld1_u8(&mc_running_avg[mc_running_avg_stride]);
const uint8x16_t v_mc_running_avg =
vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi);
/* Calculate absolute difference and sign masks. */
const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg);
const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg);
const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg);
// Clamp absolute difference to delta to get the adjustment.
const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
const uint8x16_t v_pos_adjustment =
vandq_u8(v_diff_pos_mask, v_abs_adjustment);
const uint8x16_t v_neg_adjustment =
vandq_u8(v_diff_neg_mask, v_abs_adjustment);
const uint8x8_t v_running_avg_lo = vld1_u8(running_avg);
const uint8x8_t v_running_avg_hi =
vld1_u8(&running_avg[running_avg_stride]);
uint8x16_t v_running_avg =
vcombine_u8(v_running_avg_lo, v_running_avg_hi);
v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment);
v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment);
/* Store results. */
vst1_u8(running_avg, vget_low_u8(v_running_avg));
vst1_u8(&running_avg[running_avg_stride],
vget_high_u8(v_running_avg));
{
const int8x16_t v_sum_diff =
vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
vreinterpretq_s8_u8(v_pos_adjustment));
const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
const int32x4_t fedc_ba98_7654_3210 =
vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
const int64x2_t fedcba98_76543210 =
vpaddlq_s32(fedc_ba98_7654_3210);
v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
}
/* Update pointers for next iteration. */
sig += sig_stride * 2;
mc_running_avg += mc_running_avg_stride * 2;
running_avg += running_avg_stride * 2;
}
{
// Update the sum of all pixel differences of this MB.
x = vqadd_s64(vget_high_s64(v_sum_diff_total),
vget_low_s64(v_sum_diff_total));
sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
if (sum_diff > sum_diff_thresh) {
return COPY_BLOCK;
}
}
} else {
return COPY_BLOCK;
}
}
}
/* Tell above level that block was filtered. */
running_avg -= running_avg_stride * 8;
sig -= sig_stride * 8;
vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride);
return FILTER_BLOCK;
}
|
{
"pile_set_name": "Github"
}
|
// Copyright (c) 2015, Cisco Systems
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is autogenerated
//
// The following edits are possible, without affecting the validity of the
// file:
//
// * Fields may be renamed.
// * Fields may be deleted.
// * The unique numbered tag for a field may be changed, provided that
// the ordering of tags for fields within a message is preserved.
// * Message types may be renamed.
// * Message types may be deleted (if all fields that reference them
// have been deleted).
//
// All Cisco message and field extensions must be preserved (except when the
// field itself is being deleted).
syntax = "proto3";
package cisco_ios_xr_plat_chas_invmgr_oper.platform_inventory.racks.rack.slots.slot.cards.card.attributes.basic_info;
// Bag contains all the basic inventory information for each entity
message inv_xml_entity_basic_info_KEYS {
string name = 1;
string name_1 = 2;
string name_2 = 3;
}
message inv_xml_entity_basic_info {
// name string for the entity
string name = 50;
// describes in user-readable terms what the entity in question does
string description = 51;
// model name
string model_name = 52;
// hw revision string
string hardware_revision = 53;
// serial number
string serial_number = 54;
// firmware revision string
string firmware_revision = 55;
// software revision string
string software_revision = 56;
// maps to the vendor OID string
string vendor_type = 57;
// 1 if Field Replaceable Unit 0, if not
bool is_field_replaceable_unit = 58;
}
|
{
"pile_set_name": "Github"
}
|
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!126 &1
NavMeshProjectSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
areas:
- name: Walkable
cost: 1
- name: Not Walkable
cost: 1
- name: Jump
cost: 2
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
m_LastAgentTypeID: -887442657
m_Settings:
- serializedVersion: 2
agentTypeID: 0
agentRadius: 0.5
agentHeight: 2
agentSlope: 45
agentClimb: 0.75
ledgeDropHeight: 0
maxJumpAcrossDistance: 0
minRegionArea: 2
manualCellSize: 0
cellSize: 0.16666667
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
debug:
m_Flags: 0
m_SettingNames:
- Humanoid
|
{
"pile_set_name": "Github"
}
|
// -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright 2019 The Mesh Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
#include <stdlib.h>
#include "runtime.h"
#include "thread_local_heap.h"
using namespace mesh;
static __attribute__((constructor)) void libmesh_init() {
mesh::real::init();
runtime().createSignalFd();
runtime().installSegfaultHandler();
runtime().initMaxMapCount();
char *meshPeriodStr = getenv("MESH_PERIOD_MS");
if (meshPeriodStr) {
long period = strtol(meshPeriodStr, nullptr, 10);
if (period < 0) {
period = 0;
}
runtime().setMeshPeriodMs(std::chrono::milliseconds{period});
}
char *bgThread = getenv("MESH_BACKGROUND_THREAD");
if (!bgThread)
return;
int shouldThread = atoi(bgThread);
if (shouldThread) {
runtime().startBgThread();
}
}
static __attribute__((destructor)) void libmesh_fini() {
char *mstats = getenv("MALLOCSTATS");
if (!mstats)
return;
int mlevel = atoi(mstats);
if (mlevel < 0)
mlevel = 0;
else if (mlevel > 2)
mlevel = 2;
runtime().heap().dumpStats(mlevel, false);
}
namespace mesh {
ATTRIBUTE_NEVER_INLINE
static void *allocSlowpath(size_t sz) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->malloc(sz);
}
ATTRIBUTE_NEVER_INLINE
static void *cxxNewSlowpath(size_t sz) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->cxxNew(sz);
}
ATTRIBUTE_NEVER_INLINE
static void freeSlowpath(void *ptr) {
// instead of instantiating a thread-local heap on free, just free
// to the global heap directly
runtime().heap().free(ptr);
}
ATTRIBUTE_NEVER_INLINE
static void *reallocSlowpath(void *oldPtr, size_t newSize) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->realloc(oldPtr, newSize);
}
ATTRIBUTE_NEVER_INLINE
static void *callocSlowpath(size_t count, size_t size) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->calloc(count, size);
}
ATTRIBUTE_NEVER_INLINE
static size_t usableSizeSlowpath(void *ptr) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->getSize(ptr);
}
ATTRIBUTE_NEVER_INLINE
static void *memalignSlowpath(size_t alignment, size_t size) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeap();
return localHeap->memalign(alignment, size);
}
} // namespace mesh
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void *mesh_malloc(size_t sz) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
return mesh::allocSlowpath(sz);
}
return localHeap->malloc(sz);
}
#define xxmalloc mesh_malloc
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void mesh_free(void *ptr) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
mesh::freeSlowpath(ptr);
return;
}
return localHeap->free(ptr);
}
#define xxfree mesh_free
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void mesh_sized_free(void *ptr, size_t sz) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
mesh::freeSlowpath(ptr);
return;
}
return localHeap->sizedFree(ptr, sz);
}
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void *mesh_realloc(void *oldPtr, size_t newSize) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
return mesh::reallocSlowpath(oldPtr, newSize);
}
return localHeap->realloc(oldPtr, newSize);
}
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN size_t mesh_malloc_usable_size(void *ptr) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
return mesh::usableSizeSlowpath(ptr);
}
return localHeap->getSize(ptr);
}
#define xxmalloc_usable_size mesh_malloc_usable_size
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void *mesh_memalign(size_t alignment, size_t size)
#if !defined(__FreeBSD__) && !defined(__SVR4)
throw()
#endif
{
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
return mesh::memalignSlowpath(alignment, size);
}
return localHeap->memalign(alignment, size);
}
extern "C" MESH_EXPORT CACHELINE_ALIGNED_FN void *mesh_calloc(size_t count, size_t size) {
ThreadLocalHeap *localHeap = ThreadLocalHeap::GetHeapIfPresent();
if (unlikely(localHeap == nullptr)) {
return mesh::callocSlowpath(count, size);
}
return localHeap->calloc(count, size);
}
extern "C" {
#ifdef __linux__
size_t MESH_EXPORT mesh_usable_size(void *ptr) __attribute__((weak, alias("mesh_malloc_usable_size")));
#else
// aliases are not supported on darwin
size_t MESH_EXPORT mesh_usable_size(void *ptr) {
return mesh_malloc_usable_size(ptr);
}
#endif // __linux__
// ensure we don't concurrently allocate/mess with internal heap data
// structures while forking. This is not normally invoked when
// libmesh is dynamically linked or LD_PRELOADed into a binary.
void MESH_EXPORT xxmalloc_lock(void) {
mesh::runtime().lock();
}
// ensure we don't concurrently allocate/mess with internal heap data
// structures while forking. This is not normally invoked when
// libmesh is dynamically linked or LD_PRELOADed into a binary.
void MESH_EXPORT xxmalloc_unlock(void) {
mesh::runtime().unlock();
}
int MESH_EXPORT sigaction(int signum, const struct sigaction *act, struct sigaction *oldact) MESH_THROW {
return mesh::runtime().sigaction(signum, act, oldact);
}
int MESH_EXPORT sigprocmask(int how, const sigset_t *set, sigset_t *oldset) MESH_THROW {
return mesh::runtime().sigprocmask(how, set, oldset);
}
// we need to wrap pthread_create and pthread_exit so that we can
// install our segfault handler and cleanup thread-local heaps.
int MESH_EXPORT pthread_create(pthread_t *thread, const pthread_attr_t *attr, mesh::PthreadFn startRoutine,
void *arg) MESH_THROW {
return mesh::runtime().createThread(thread, attr, startRoutine, arg);
}
void MESH_EXPORT ATTRIBUTE_NORETURN pthread_exit(void *retval) {
mesh::runtime().exitThread(retval);
}
// Same API as je_mallctl, allows a program to query stats and set
// allocator-related options.
int MESH_EXPORT mesh_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
return mesh::runtime().heap().mallctl(name, oldp, oldlenp, newp, newlen);
}
#ifdef __linux__
int MESH_EXPORT epoll_wait(int __epfd, struct epoll_event *__events, int __maxevents, int __timeout) {
return mesh::runtime().epollWait(__epfd, __events, __maxevents, __timeout);
}
int MESH_EXPORT epoll_pwait(int __epfd, struct epoll_event *__events, int __maxevents, int __timeout,
const __sigset_t *__ss) {
return mesh::runtime().epollPwait(__epfd, __events, __maxevents, __timeout, __ss);
}
#endif
}
#if defined(__linux__)
#include "gnu_wrapper.cc"
#elif defined(__APPLE__)
#include "mac_wrapper.cc"
#else
#error "only linux and macOS support for now"
#endif
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html xmlns:yui="http://yuilibrary.com/rdf/1.0/yui.rdf#">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>API: imageloader YAHOO.util.ImageLoader.bgImgObj (YUI Library)</title>
<link rel="stylesheet" type="text/css" href="assets/reset-fonts-grids-min.css" />
<link rel="stylesheet" type="text/css" href="assets/api.css" />
<script type="text/javascript" src="assets/api-js"></script>
<script type="text/javascript" src="assets/ac-js"></script>
</head>
<body id="yahoo-com">
<div id="doc3" class="yui-t2">
<div id="hd">
<h1><a href="http://developer.yahoo.com/yui/" title="Yahoo! UI Library">Yahoo! UI Library</a></h1>
<h3>imageloader <span class="subtitle">2.8.2r1</span></h3>
<a href="./index.html" title="Yahoo! UI Library">Yahoo! UI Library</a>
> <a href="./module_imageloader.html" title="imageloader">imageloader</a>
> YAHOO.util.ImageLoader.bgImgObj
<form onsubmit="return false">
<div id="propertysearch">
Search: <input autocomplete="off" id="searchinput" />
<div id="searchresults">
</div>
</div>
</form>
</div>
<div id="bd">
<div id="yui-main">
<div class="yui-b">
<form action="#" name="yui-classopts-form" method="get" id="yui-classopts-form">
<fieldset>
<legend>Filters</legend>
<span class="classopts"><input type="checkbox" name="show_private" id="show_private" /> <label for="show_private">Show Private</label></span>
<span class="classopts"><input type="checkbox" name="show_protected" id="show_protected" /> <label for="show_protected">Show Protected</label></span>
<span class="classopts"><input type="checkbox" name="show_deprecated" id="show_deprecated" /> <label for="show_deprecated">Show Deprecated</label></span>
</fieldset>
</form>
<h2>
Class <b property="yui:name">YAHOO.util.ImageLoader.bgImgObj</b>
<span class="extends">
- extends <a href="YAHOO.util.ImageLoader.imgObj.html" title="YAHOO.util.ImageLoader.imgObj">YAHOO.util.ImageLoader.imgObj</a>
</span>
</h2>
<!-- class tree goes here -->
<div class="summary description" property="yui:description">
Background image object. A background image is one whose URL is specified by "background-image" in the element's style
</div>
<div class="section constructor details" rel="yui:constructor" resource="#constructor">
<h3 id="constructor">Constructor</h3>
<div class="content">
<div class="detail">
<strong property="yui:name">YAHOO.util.ImageLoader.bgImgObj</strong>
<code>
(
domId
,
url
)
</code>
<div class="description">
<dl rel="yui:parameters">
<dt>Parameters:</dt>
<dd rel="yui:parameter">
<code><span property="yui:name">domId</span>
<<span property="yui:type">String</span>>
</code>
<span property="yui:description"> HTML DOM id of the image element</span>
</dd>
<dd rel="yui:parameter">
<code><span property="yui:name">url</span>
<<span property="yui:type">String</span>>
</code>
<span property="yui:description"> URL for the image</span>
</dd>
</dl>
</div>
</div>
</div>
</div>
<div rel="yui:properties" resource="#properties">
<div rel="yui:inheritance">
<div class="section field inheritance" rel="yui:superclass" resource="YAHOO.util.ImageLoader.imgObj.html">
<h4>Properties inherited from <a href="YAHOO.util.ImageLoader.imgObj.html" property="yui:name" title="YAHOO.util.ImageLoader.imgObj">YAHOO.util.ImageLoader.imgObj</a>:</h4>
<div class="content" rel="yui:properties">
<code>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property__fetched">
<a class="private" href="YAHOO.util.ImageLoader.imgObj.html#property__fetched" property="yui:name" title="_fetched">_fetched</a><span class="private">,</span>
</span>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property_domId">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#property_domId" property="yui:name" title="domId">domId</a><span class="">,</span>
</span>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property_height">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#property_height" property="yui:name" title="height">height</a><span class="">,</span>
</span>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property_setVisible">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#property_setVisible" property="yui:name" title="setVisible">setVisible</a><span class="">,</span>
</span>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property_url">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#property_url" property="yui:name" title="url">url</a><span class="">,</span>
</span>
<span rel="yui:property" resource="YAHOO.util.ImageLoader.imgObj.html#property_width">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#property_width" property="yui:name" title="width">width</a>
</span>
</code>
</div>
</div>
</div>
</div>
<div rel="yui:methods" resource="#methods">
<div class="section method details">
<h3 id="methods">Methods</h3>
<div class="content">
<div class="private" rel="yui:method" resource="#method__applyUrl">
<h4>
<a name="method__applyUrl">_applyUrl</a></h4>
<div class="detail" >
<code>
private
void
<strong property="yui:name">_applyUrl</strong>
(
el
)
</code>
<div class="description" property="yui:description">
Inserts the image URL into the DOM so that the image is displayed.
Sets style.backgroundImage
</div>
<div class="description">
<dl rel="yui:parameters">
<dt>Parameters:</dt>
<dd rel="yui:parameter">
<code><span property="yui:name">el</span>
<<span property="yui:type">Object</span>>
</code>
<span property="yui:description"> HTML DOM element</span>
</dd>
</dl>
</div>
</div>
<hr />
</div>
</div>
</div>
<div rel="yui:inheritance">
<div class="section field inheritance" rel="yui:superclass" resource="YAHOO.util.ImageLoader.imgObj.html">
<h4>Methods inherited from <a href="YAHOO.util.ImageLoader.imgObj.html" property="yui:name" title="YAHOO.util.ImageLoader.imgObj">YAHOO.util.ImageLoader.imgObj</a>:</h4>
<div class="content" rel="yui:methods">
<code>
<span rel="yui:method" resource="YAHOO.util.ImageLoader.imgObj.html#method__applyUrl">
<a class="private" href="YAHOO.util.ImageLoader.imgObj.html#method__applyUrl" property="yui:name" title="_applyUrl">_applyUrl</a><span class="private">,</span>
</span>
<span rel="yui:method" resource="YAHOO.util.ImageLoader.imgObj.html#method_fetch">
<a class="" href="YAHOO.util.ImageLoader.imgObj.html#method_fetch" property="yui:name" title="fetch">fetch</a>
</span>
</code>
</div>
</div>
</div>
</div>
<div rel="yui:events" resource="#events">
</div>
<div rel="yui:attributes" resource="#configattributes">
</div>
</div>
</div>
<div class="yui-b">
<div class="nav">
<div id="moduleList" class="module">
<h4>Modules</h4>
<ul class="content">
<li class=""><a href="module_animation.html" title="animation">animation</a></li>
<li class=""><a href="module_autocomplete.html" title="autocomplete">autocomplete</a></li>
<li class=""><a href="module_button.html" title="button">button</a></li>
<li class=""><a href="module_calendar.html" title="calendar">calendar</a></li>
<li class=""><a href="module_carousel.html" title="carousel">carousel</a></li>
<li class=""><a href="module_charts.html" title="charts">charts</a></li>
<li class=""><a href="module_colorpicker.html" title="colorpicker">colorpicker</a></li>
<li class=""><a href="module_connection.html" title="connection">connection</a></li>
<li class=""><a href="module_container.html" title="container">container</a></li>
<li class=""><a href="module_cookie.html" title="cookie">cookie</a></li>
<li class=""><a href="module_datasource.html" title="datasource">datasource</a></li>
<li class=""><a href="module_datatable.html" title="datatable">datatable</a></li>
<li class=""><a href="module_datemath.html" title="datemath">datemath</a></li>
<li class=""><a href="module_dom.html" title="dom">dom</a></li>
<li class=""><a href="module_dragdrop.html" title="dragdrop">dragdrop</a></li>
<li class=""><a href="module_editor.html" title="editor">editor</a></li>
<li class=""><a href="module_element.html" title="element">element</a></li>
<li class=""><a href="module_element-delegate.html" title="element-delegate">element-delegate</a></li>
<li class=""><a href="module_event.html" title="event">event</a></li>
<li class=""><a href="module_event-delegate.html" title="event-delegate">event-delegate</a></li>
<li class=""><a href="module_event-mouseenter.html" title="event-mouseenter">event-mouseenter</a></li>
<li class=""><a href="module_event-simulate.html" title="event-simulate">event-simulate</a></li>
<li class=""><a href="module_get.html" title="get">get</a></li>
<li class=""><a href="module_history.html" title="history">history</a></li>
<li class=""><a href="module_imagecropper.html" title="imagecropper">imagecropper</a></li>
<li class="selected"><a href="module_imageloader.html" title="imageloader">imageloader</a></li>
<li class=""><a href="module_json.html" title="json">json</a></li>
<li class=""><a href="module_layout.html" title="layout">layout</a></li>
<li class=""><a href="module_logger.html" title="logger">logger</a></li>
<li class=""><a href="module_menu.html" title="menu">menu</a></li>
<li class=""><a href="module_paginator.html" title="paginator">paginator</a></li>
<li class=""><a href="module_profiler.html" title="profiler">profiler</a></li>
<li class=""><a href="module_profilerviewer.html" title="profilerviewer">profilerviewer</a></li>
<li class=""><a href="module_progressbar.html" title="progressbar">progressbar</a></li>
<li class=""><a href="module_resize.html" title="resize">resize</a></li>
<li class=""><a href="module_selector.html" title="selector">selector</a></li>
<li class=""><a href="module_slider.html" title="slider">slider</a></li>
<li class=""><a href="module_storage.html" title="Storage">Storage</a></li>
<li class=""><a href="module_stylesheet.html" title="stylesheet">stylesheet</a></li>
<li class=""><a href="module_swf.html" title="swf">swf</a></li>
<li class=""><a href="module_swfdetect.html" title="swfdetect">swfdetect</a></li>
<li class=""><a href="module_swfstore.html" title="swfstore">swfstore</a></li>
<li class=""><a href="module_tabview.html" title="tabview">tabview</a></li>
<li class=""><a href="module_treeview.html" title="treeview">treeview</a></li>
<li class=""><a href="module_uploader.html" title="uploader">uploader</a></li>
<li class=""><a href="module_yahoo.html" title="yahoo">yahoo</a></li>
<li class=""><a href="module_yuiloader.html" title="yuiloader">yuiloader</a></li>
<li class=""><a href="module_yuitest.html" title="yuitest">yuitest</a></li>
</ul>
</div>
<div id="classList" class="module">
<h4>Classes</h4>
<ul class="content">
<li class="selected"><a href="YAHOO.util.ImageLoader.bgImgObj.html" title="YAHOO.util.ImageLoader.bgImgObj">YAHOO.util.ImageLoader.bgImgObj</a></li>
<li class=""><a href="YAHOO.util.ImageLoader.group.html" title="YAHOO.util.ImageLoader.group">YAHOO.util.ImageLoader.group</a></li>
<li class=""><a href="YAHOO.util.ImageLoader.imgObj.html" title="YAHOO.util.ImageLoader.imgObj">YAHOO.util.ImageLoader.imgObj</a></li>
<li class=""><a href="YAHOO.util.ImageLoader.pngBgImgObj.html" title="YAHOO.util.ImageLoader.pngBgImgObj">YAHOO.util.ImageLoader.pngBgImgObj</a></li>
<li class=""><a href="YAHOO.util.ImageLoader.srcImgObj.html" title="YAHOO.util.ImageLoader.srcImgObj">YAHOO.util.ImageLoader.srcImgObj</a></li>
</ul>
</div>
<div id="fileList" class="module">
<h4>Files</h4>
<ul class="content">
<li class=""><a href="ImageLoader.js.html" title="ImageLoader.js">ImageLoader.js</a></li>
</ul>
</div>
<div id="methodsList" class="module">
<h4>Methods</h4>
<ul class="content">
<li class="private"><a href="#method__applyUrl" title="_applyUrl">_applyUrl</a></li>
</ul>
</div>
</div>
</div>
</div>
<div id="ft">
<hr />
Copyright © 2010 Yahoo! Inc. All rights reserved.
</div>
</div>
<script type="text/javascript">
var ALL_YUI_PROPS = [{"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "addCustomTrigger", "url": "YAHOO.util.ImageLoader.group.html#method_addCustomTrigger", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "addTrigger", "url": "YAHOO.util.ImageLoader.group.html#method_addTrigger", "type": "method"}, {"access": "private", "host": "YAHOO.util.ImageLoader.imgObj", "name": "_applyUrl", "url": "YAHOO.util.ImageLoader.imgObj.html#method__applyUrl", "type": "method"}, {"access": "private", "host": "YAHOO.util.ImageLoader.bgImgObj", "name": "_applyUrl", "url": "YAHOO.util.ImageLoader.bgImgObj.html#method__applyUrl", "type": "method"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_classImageEls", "url": "YAHOO.util.ImageLoader.group.html#property__classImageEls", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "className", "url": "YAHOO.util.ImageLoader.group.html#property_className", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_customTriggers", "url": "YAHOO.util.ImageLoader.group.html#property__customTriggers", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "domId", "url": "YAHOO.util.ImageLoader.imgObj.html#property_domId", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "fetch", "url": "YAHOO.util.ImageLoader.group.html#method_fetch", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "fetch", "url": "YAHOO.util.ImageLoader.imgObj.html#method_fetch", "type": "method"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_fetchByClass", "url": "YAHOO.util.ImageLoader.group.html#method__fetchByClass", "type": "method"}, {"access": "private", "host": "YAHOO.util.ImageLoader.imgObj", "name": "_fetched", "url": "YAHOO.util.ImageLoader.imgObj.html#property__fetched", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_foldCheck", "url": "YAHOO.util.ImageLoader.group.html#method__foldCheck", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "foldConditional", "url": "YAHOO.util.ImageLoader.group.html#property_foldConditional", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_getFetchTimeout", "url": "YAHOO.util.ImageLoader.group.html#method__getFetchTimeout", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "height", "url": "YAHOO.util.ImageLoader.imgObj.html#property_height", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_imgObjs", "url": "YAHOO.util.ImageLoader.group.html#property__imgObjs", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "name", "url": "YAHOO.util.ImageLoader.group.html#property_name", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_onloadTasks", "url": "YAHOO.util.ImageLoader.group.html#method__onloadTasks", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "registerBgImage", "url": "YAHOO.util.ImageLoader.group.html#method_registerBgImage", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "registerPngBgImage", "url": "YAHOO.util.ImageLoader.group.html#method_registerPngBgImage", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "registerSrcImage", "url": "YAHOO.util.ImageLoader.group.html#method_registerSrcImage", "type": "method"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "setVisible", "url": "YAHOO.util.ImageLoader.imgObj.html#property_setVisible", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_timeout", "url": "YAHOO.util.ImageLoader.group.html#property__timeout", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.group", "name": "timeoutLen", "url": "YAHOO.util.ImageLoader.group.html#property_timeoutLen", "type": "property"}, {"access": "private", "host": "YAHOO.util.ImageLoader.group", "name": "_triggers", "url": "YAHOO.util.ImageLoader.group.html#property__triggers", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "url", "url": "YAHOO.util.ImageLoader.imgObj.html#property_url", "type": "property"}, {"access": "", "host": "YAHOO.util.ImageLoader.imgObj", "name": "width", "url": "YAHOO.util.ImageLoader.imgObj.html#property_width", "type": "property"}];
</script>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
const { staticScriptsConfig } = require("./paths")
const gerateStaticScriptsConfig = require(staticScriptsConfig)
// Todo: Create sane defaults
module.exports = function(env) {
const isProduction = process.env.NODE_ENV === "production"
let config = gerateStaticScriptsConfig(env)
if (!config.generator) {
throw new Error("`generator` must be provided by gulp.config.js")
}
config.dest = config.dest || "site/"
config.src = config.src || "src/"
config.tmp = config.tmp || ".tmp/"
config.build = config.build || "dist/"
const { styles, scripts, images, svg } = config
const userConfig = Object.assign({}, config)
delete userConfig.styles
delete userConfig.scripts
delete userConfig.images
delete userConfig.svg
return Object.assign(
{
styles: Object.assign(
{
src: config.src + "css/*.css",
watch: config.src + "css/**/*.css",
dest: config.dest + "static/css",
tmp: config.tmp + "css",
},
config.styles
),
scripts: Object.assign(
{
src: config.src + "js/*+(js|jsx)",
watch: config.src + "js/**/*+(js|jsx)",
dest: config.dest + "static/js/",
tmp: config.tmp + "js/",
},
config.scripts
),
images: Object.assign(
{
src: config.src + "img/**/*.+(png|jpg|jpeg|gif|svg|webp)",
watch: config.src + "img/**/*.+(png|jpg|jpeg|gif|svg|webp)",
dest: config.dest + "static/img/",
},
config.images
),
svg: Object.assign(
{
src: config.src + "img/**/*.svg",
watch: config.src + "img/**/*.svg",
dest: config.dest + "static/svg/",
config: {
dest: ".",
mode: {
symbol: {
sprite: "sprite.symbol.svg",
prefix: "svg-%s",
dest: ".",
},
},
example: !isProduction,
},
},
config.svg
),
},
userConfig
)
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.org.apache.xalan.internal.xsltc.compiler.util;
import java.util.ListResourceBundle;
/**
* @author Morten Jorgensen
*/
public class ErrorMessages_ja extends ListResourceBundle {
/*
* XSLTC compile-time error messages.
*
* General notes to translators and definitions:
*
* 1) XSLTC is the name of the product. It is an acronym for "XSLT Compiler".
* XSLT is an acronym for "XML Stylesheet Language: Transformations".
*
* 2) A stylesheet is a description of how to transform an input XML document
* into a resultant XML document (or HTML document or text). The
* stylesheet itself is described in the form of an XML document.
*
* 3) A template is a component of a stylesheet that is used to match a
* particular portion of an input document and specifies the form of the
* corresponding portion of the output document.
*
* 4) An axis is a particular "dimension" in a tree representation of an XML
* document; the nodes in the tree are divided along different axes.
* Traversing the "child" axis, for instance, means that the program
* would visit each child of a particular node; traversing the "descendant"
* axis means that the program would visit the child nodes of a particular
* node, their children, and so on until the leaf nodes of the tree are
* reached.
*
* 5) An iterator is an object that traverses nodes in a tree along a
* particular axis, one at a time.
*
* 6) An element is a mark-up tag in an XML document; an attribute is a
* modifier on the tag. For example, in <elem attr='val' attr2='val2'>
* "elem" is an element name, "attr" and "attr2" are attribute names with
* the values "val" and "val2", respectively.
*
* 7) A namespace declaration is a special attribute that is used to associate
* a prefix with a URI (the namespace). The meanings of element names and
* attribute names that use that prefix are defined with respect to that
* namespace.
*
* 8) DOM is an acronym for Document Object Model. It is a tree
* representation of an XML document.
*
* SAX is an acronym for the Simple API for XML processing. It is an API
* used inform an XML processor (in this case XSLTC) of the structure and
* content of an XML document.
*
* Input to the stylesheet processor can come from an XML parser in the
* form of a DOM tree or through the SAX API.
*
* 9) DTD is a document type declaration. It is a way of specifying the
* grammar for an XML file, the names and types of elements, attributes,
* etc.
*
* 10) XPath is a specification that describes a notation for identifying
* nodes in a tree-structured representation of an XML document. An
* instance of that notation is referred to as an XPath expression.
*
* 11) Translet is an invented term that refers to the class file that contains
* the compiled form of a stylesheet.
*/
// These message should be read from a locale-specific resource bundle
/** Get the lookup table for error messages.
*
* @return The message lookup table.
*/
public Object[][] getContents()
{
return new Object[][] {
{ErrorMsg.MULTIPLE_STYLESHEET_ERR,
"\u540C\u3058\u30D5\u30A1\u30A4\u30EB\u306B\u8907\u6570\u306E\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u304C\u5B9A\u7FA9\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: The substitution text is the name of a
* template. The same name was used on two different templates in the
* same stylesheet.
*/
{ErrorMsg.TEMPLATE_REDEF_ERR,
"\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8''{0}''\u306F\u3053\u306E\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u5185\u3067\u3059\u3067\u306B\u5B9A\u7FA9\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: The substitution text is the name of a
* template. A reference to the template name was encountered, but the
* template is undefined.
*/
{ErrorMsg.TEMPLATE_UNDEF_ERR,
"\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8''{0}''\u306F\u3053\u306E\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u5185\u3067\u5B9A\u7FA9\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The substitution text is the name of a variable
* that was defined more than once.
*/
{ErrorMsg.VARIABLE_REDEF_ERR,
"\u5909\u6570''{0}''\u306F\u540C\u3058\u30B9\u30B3\u30FC\u30D7\u5185\u3067\u8907\u6570\u5B9A\u7FA9\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: The substitution text is the name of a variable
* or parameter. A reference to the variable or parameter was found,
* but it was never defined.
*/
{ErrorMsg.VARIABLE_UNDEF_ERR,
"\u5909\u6570\u307E\u305F\u306F\u30D1\u30E9\u30E1\u30FC\u30BF''{0}''\u304C\u672A\u5B9A\u7FA9\u3067\u3059\u3002"},
/*
* Note to translators: The word "class" here refers to a Java class.
* Processing the stylesheet required a class to be loaded, but it could
* not be found. The substitution text is the name of the class.
*/
{ErrorMsg.CLASS_NOT_FOUND_ERR,
"\u30AF\u30E9\u30B9''{0}''\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The word "method" here refers to a Java method.
* Processing the stylesheet required a reference to the method named by
* the substitution text, but it could not be found. "public" is the
* Java keyword.
*/
{ErrorMsg.METHOD_NOT_FOUND_ERR,
"\u5916\u90E8\u30E1\u30BD\u30C3\u30C9''{0}''\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093(public\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059)\u3002"},
/*
* Note to translators: The word "method" here refers to a Java method.
* Processing the stylesheet required a reference to the method named by
* the substitution text, but no method with the required types of
* arguments or return type could be found.
*/
{ErrorMsg.ARGUMENT_CONVERSION_ERR,
"\u30E1\u30BD\u30C3\u30C9''{0}''\u306E\u547C\u51FA\u3057\u306E\u5F15\u6570\u30BF\u30A4\u30D7\u307E\u305F\u306F\u623B\u308A\u578B\u3092\u5909\u63DB\u3067\u304D\u307E\u305B\u3093"},
/*
* Note to translators: The file or URI named in the substitution text
* is missing.
*/
{ErrorMsg.FILE_NOT_FOUND_ERR,
"\u30D5\u30A1\u30A4\u30EB\u307E\u305F\u306FURI ''{0}''\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: This message is displayed when the URI
* mentioned in the substitution text is not well-formed syntactically.
*/
{ErrorMsg.INVALID_URI_ERR,
"URI ''{0}''\u304C\u7121\u52B9\u3067\u3059\u3002"},
/*
* Note to translators: This message is displayed when the URI
* mentioned in the substitution text is not well-formed syntactically.
*/
{ErrorMsg.CATALOG_EXCEPTION,
"JAXP08090001: CatalogResolver\u306F\u30AB\u30BF\u30ED\u30B0\"{0}\"\u3067\u6709\u52B9\u3067\u3059\u304C\u3001CatalogException\u304C\u8FD4\u3055\u308C\u307E\u3059\u3002"},
/*
* Note to translators: The file or URI named in the substitution text
* exists but could not be opened.
*/
{ErrorMsg.FILE_ACCESS_ERR,
"\u30D5\u30A1\u30A4\u30EB\u307E\u305F\u306FURI ''{0}''\u3092\u958B\u304F\u3053\u3068\u304C\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: <xsl:stylesheet> and <xsl:transform> are
* keywords that should not be translated.
*/
{ErrorMsg.MISSING_ROOT_ERR,
"<xsl:stylesheet>\u307E\u305F\u306F<xsl:transform>\u306E\u8981\u7D20\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The stylesheet contained a reference to a
* namespace prefix that was undefined. The value of the substitution
* text is the name of the prefix.
*/
{ErrorMsg.NAMESPACE_UNDEF_ERR,
"\u30CD\u30FC\u30E0\u30B9\u30DA\u30FC\u30B9\u306E\u63A5\u982D\u8F9E''{0}''\u306F\u5BA3\u8A00\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The Java function named in the stylesheet could
* not be found.
*/
{ErrorMsg.FUNCTION_RESOLVE_ERR,
"\u95A2\u6570''{0}''\u306E\u547C\u51FA\u3057\u3092\u89E3\u6C7A\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The substitution text is the name of a
* function. A literal string here means a constant string value.
*/
{ErrorMsg.NEED_LITERAL_ERR,
"''{0}''\u3078\u306E\u5F15\u6570\u306F\u30EA\u30C6\u30E9\u30EB\u6587\u5B57\u5217\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: This message indicates there was a syntactic
* error in the form of an XPath expression. The substitution text is
* the expression.
*/
{ErrorMsg.XPATH_PARSER_ERR,
"XPath\u5F0F''{0}''\u306E\u89E3\u6790\u4E2D\u306B\u30A8\u30E9\u30FC\u304C\u767A\u751F\u3057\u307E\u3057\u305F\u3002"},
/*
* Note to translators: An element in the stylesheet requires a
* particular attribute named by the substitution text, but that
* attribute was not specified in the stylesheet.
*/
{ErrorMsg.REQUIRED_ATTR_ERR,
"\u5FC5\u9808\u5C5E\u6027''{0}''\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: This message indicates that a character not
* permitted in an XPath expression was encountered. The substitution
* text is the offending character.
*/
{ErrorMsg.ILLEGAL_CHAR_ERR,
"XPath\u5F0F\u306E\u6587\u5B57''{0}''\u306F\u7121\u52B9\u3067\u3059\u3002"},
/*
* Note to translators: A processing instruction is a mark-up item in
* an XML document that request some behaviour of an XML processor. The
* form of the name of was invalid in this case, and the substitution
* text is the name.
*/
{ErrorMsg.ILLEGAL_PI_ERR,
"\u51E6\u7406\u547D\u4EE4\u306E\u540D\u524D''{0}''\u306F\u7121\u52B9\u3067\u3059\u3002"},
/*
* Note to translators: This message is reported if the stylesheet
* being processed attempted to construct an XML document with an
* attribute in a place other than on an element. The substitution text
* specifies the name of the attribute.
*/
{ErrorMsg.STRAY_ATTRIBUTE_ERR,
"\u5C5E\u6027''{0}''\u304C\u8981\u7D20\u306E\u5916\u5074\u306B\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: An attribute that wasn't recognized was
* specified on an element in the stylesheet. The attribute is named
* by the substitution
* text.
*/
{ErrorMsg.ILLEGAL_ATTRIBUTE_ERR,
"\u4E0D\u6B63\u306A\u5C5E\u6027''{0}''\u3067\u3059\u3002"},
/*
* Note to translators: "import" and "include" are keywords that should
* not be translated. This messages indicates that the stylesheet
* named in the substitution text imported or included itself either
* directly or indirectly.
*/
{ErrorMsg.CIRCULAR_INCLUDE_ERR,
"\u30A4\u30F3\u30DD\u30FC\u30C8\u307E\u305F\u306F\u30A4\u30F3\u30AF\u30EB\u30FC\u30C9\u304C\u5FAA\u74B0\u3057\u3066\u3044\u307E\u3059\u3002\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8''{0}''\u306F\u3059\u3067\u306B\u30ED\u30FC\u30C9\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: "xsl:import" and "xsl:include" are keywords that
* should not be translated.
*/
{ErrorMsg.IMPORT_PRECEDE_OTHERS_ERR,
"xsl:import\u8981\u7D20\u306E\u5B50\u306F\u3001xsl:stylesheet\u8981\u7D20\u306E\u4ED6\u306E\u3059\u3079\u3066\u306E\u8981\u7D20\u306E\u5B50(\u3059\u3079\u3066\u306Exsl:include\u8981\u7D20\u306E\u5B50\u3092\u542B\u3080)\u3088\u308A\u524D\u306B\u7F6E\u304F\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: A result-tree fragment is a portion of a
* resulting XML document represented as a tree. "<xsl:sort>" is a
* keyword and should not be translated.
*/
{ErrorMsg.RESULT_TREE_SORT_ERR,
"\u7D50\u679C\u30C4\u30EA\u30FC\u30FB\u30D5\u30E9\u30B0\u30E1\u30F3\u30C8\u306F\u30BD\u30FC\u30C8\u3067\u304D\u307E\u305B\u3093(<xsl:sort>\u8981\u7D20\u306F\u7121\u8996\u3055\u308C\u307E\u3059)\u3002\u7D50\u679C\u30C4\u30EA\u30FC\u3092\u4F5C\u6210\u3059\u308B\u3068\u304D\u306B\u30CE\u30FC\u30C9\u3092\u30BD\u30FC\u30C8\u3059\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: A name can be given to a particular style to be
* used to format decimal values. The substitution text gives the name
* of such a style for which more than one declaration was encountered.
*/
{ErrorMsg.SYMBOLS_REDEF_ERR,
"10\u9032\u6570\u30D5\u30A9\u30FC\u30DE\u30C3\u30C8''{0}''\u306F\u3059\u3067\u306B\u5B9A\u7FA9\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: The stylesheet version named in the
* substitution text is not supported.
*/
{ErrorMsg.XSL_VERSION_ERR,
"XSL\u30D0\u30FC\u30B8\u30E7\u30F3''{0}''\u306FXSLTC\u306B\u3088\u3063\u3066\u30B5\u30DD\u30FC\u30C8\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The definitions of one or more variables or
* parameters depend on one another.
*/
{ErrorMsg.CIRCULAR_VARIABLE_ERR,
"''{0}''\u5185\u306E\u5909\u6570\u53C2\u7167\u307E\u305F\u306F\u30D1\u30E9\u30E1\u30FC\u30BF\u53C2\u7167\u304C\u5FAA\u74B0\u3057\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: The operator in an expresion with two operands was
* not recognized.
*/
{ErrorMsg.ILLEGAL_BINARY_OP_ERR,
"2\u9032\u6570\u306E\u5F0F\u306B\u5BFE\u3059\u308B\u4E0D\u660E\u306A\u6F14\u7B97\u5B50\u3067\u3059\u3002"},
/*
* Note to translators: This message is produced if a reference to a
* function has too many or too few arguments.
*/
{ErrorMsg.ILLEGAL_ARG_ERR,
"\u95A2\u6570\u547C\u51FA\u3057\u306E\u5F15\u6570\u304C\u4E0D\u6B63\u3067\u3059\u3002"},
/*
* Note to translators: "document()" is the name of function and must
* not be translated. A node-set is a set of the nodes in the tree
* representation of an XML document.
*/
{ErrorMsg.DOCUMENT_ARG_ERR,
"document()\u95A2\u6570\u306E2\u756A\u76EE\u306E\u5F15\u6570\u306F\u30CE\u30FC\u30C9\u30BB\u30C3\u30C8\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: "<xsl:when>" and "<xsl:choose>" are keywords
* and should not be translated. This message describes a syntax error
* in the stylesheet.
*/
{ErrorMsg.MISSING_WHEN_ERR,
"<xsl:choose>\u5185\u306B\u306F\u5C11\u306A\u304F\u3068\u30821\u3064\u306E<xsl:when>\u8981\u7D20\u304C\u5FC5\u8981\u3067\u3059\u3002"},
/*
* Note to translators: "<xsl:otherwise>" and "<xsl:choose>" are
* keywords and should not be translated. This message describes a
* syntax error in the stylesheet.
*/
{ErrorMsg.MULTIPLE_OTHERWISE_ERR,
"<xsl:choose>\u5185\u3067\u306F1\u3064\u306E<xsl:otherwise>\u8981\u7D20\u306E\u307F\u304C\u8A31\u53EF\u3055\u308C\u3066\u3044\u307E\u3059\u3002"},
/*
* Note to translators: "<xsl:otherwise>" and "<xsl:choose>" are
* keywords and should not be translated. This message describes a
* syntax error in the stylesheet.
*/
{ErrorMsg.STRAY_OTHERWISE_ERR,
"<xsl:otherwise>\u306F<xsl:choose>\u5185\u3067\u306E\u307F\u4F7F\u7528\u3067\u304D\u307E\u3059\u3002"},
/*
* Note to translators: "<xsl:when>" and "<xsl:choose>" are keywords
* and should not be translated. This message describes a syntax error
* in the stylesheet.
*/
{ErrorMsg.STRAY_WHEN_ERR,
"<xsl:when>\u306F<xsl:choose>\u5185\u3067\u306E\u307F\u4F7F\u7528\u3067\u304D\u307E\u3059\u3002"},
/*
* Note to translators: "<xsl:when>", "<xsl:otherwise>" and
* "<xsl:choose>" are keywords and should not be translated. This
* message describes a syntax error in the stylesheet.
*/
{ErrorMsg.WHEN_ELEMENT_ERR,
"<xsl:choose>\u5185\u3067\u306F<xsl:when>\u3068<xsl:otherwise>\u306E\u8981\u7D20\u306E\u307F\u304C\u8A31\u53EF\u3055\u308C\u307E\u3059\u3002"},
/*
* Note to translators: "<xsl:attribute-set>" and "name" are keywords
* that should not be translated.
*/
{ErrorMsg.UNNAMED_ATTRIBSET_ERR,
"<xsl:attribute-set>\u306B'name'\u5C5E\u6027\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: An element in the stylesheet contained an
* element of a type that it was not permitted to contain.
*/
{ErrorMsg.ILLEGAL_CHILD_ERR,
"\u5B50\u8981\u7D20\u304C\u4E0D\u6B63\u3067\u3059\u3002"},
/*
* Note to translators: The stylesheet tried to create an element with
* a name that was not a valid XML name. The substitution text contains
* the name.
*/
{ErrorMsg.ILLEGAL_ELEM_NAME_ERR,
"\u8981\u7D20''{0}''\u3092\u547C\u3073\u51FA\u3059\u3053\u3068\u306F\u3067\u304D\u307E\u305B\u3093"},
/*
* Note to translators: The stylesheet tried to create an attribute
* with a name that was not a valid XML name. The substitution text
* contains the name.
*/
{ErrorMsg.ILLEGAL_ATTR_NAME_ERR,
"\u5C5E\u6027''{0}''\u3092\u547C\u3073\u51FA\u3059\u3053\u3068\u306F\u3067\u304D\u307E\u305B\u3093"},
/*
* Note to translators: The children of the outermost element of a
* stylesheet are referred to as top-level elements. No text should
* occur within that outermost element unless it is within a top-level
* element. This message indicates that that constraint was violated.
* "<xsl:stylesheet>" is a keyword that should not be translated.
*/
{ErrorMsg.ILLEGAL_TEXT_NODE_ERR,
"\u30C6\u30AD\u30B9\u30C8\u30FB\u30C7\u30FC\u30BF\u306F\u30C8\u30C3\u30D7\u30EC\u30D9\u30EB\u306E<xsl:stylesheet>\u8981\u7D20\u306E\u5916\u5074\u306B\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: JAXP is an acronym for the Java API for XML
* Processing. This message indicates that the XML parser provided to
* XSLTC to process the XML input document had a configuration problem.
*/
{ErrorMsg.SAX_PARSER_CONFIG_ERR,
"JAXP\u30D1\u30FC\u30B5\u30FC\u304C\u6B63\u3057\u304F\u69CB\u6210\u3055\u308C\u3066\u3044\u307E\u305B\u3093"},
/*
* Note to translators: The substitution text names the internal error
* encountered.
*/
{ErrorMsg.INTERNAL_ERR,
"\u30EA\u30AB\u30D0\u30EA\u4E0D\u80FD\u306AXSLTC\u5185\u90E8\u30A8\u30E9\u30FC: ''{0}''"},
/*
* Note to translators: The stylesheet contained an element that was
* not recognized as part of the XSL syntax. The substitution text
* gives the element name.
*/
{ErrorMsg.UNSUPPORTED_XSL_ERR,
"XSL\u8981\u7D20''{0}''\u306F\u30B5\u30DD\u30FC\u30C8\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The stylesheet referred to an extension to the
* XSL syntax and indicated that it was defined by XSLTC, but XSTLC does
* not recognized the particular extension named. The substitution text
* gives the extension name.
*/
{ErrorMsg.UNSUPPORTED_EXT_ERR,
"XSLTC\u62E1\u5F35''{0}''\u306F\u8A8D\u8B58\u3055\u308C\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The XML document given to XSLTC as a stylesheet
* was not, in fact, a stylesheet. XSLTC is able to detect that in this
* case because the outermost element in the stylesheet has to be
* declared with respect to the XSL namespace URI, but no declaration
* for that namespace was seen.
*/
{ErrorMsg.MISSING_XSLT_URI_ERR,
"\u5165\u529B\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u306F\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u3067\u306F\u3042\u308A\u307E\u305B\u3093(XSL\u306E\u30CD\u30FC\u30E0\u30B9\u30DA\u30FC\u30B9\u306F\u30EB\u30FC\u30C8\u8981\u7D20\u5185\u3067\u5BA3\u8A00\u3055\u308C\u3066\u3044\u307E\u305B\u3093)\u3002"},
/*
* Note to translators: XSLTC could not find the stylesheet document
* with the name specified by the substitution text.
*/
{ErrorMsg.MISSING_XSLT_TARGET_ERR,
"\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u30FB\u30BF\u30FC\u30B2\u30C3\u30C8''{0}''\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
/*
* Note to translators: access to the stylesheet target is denied
*/
{ErrorMsg.ACCESSING_XSLT_TARGET_ERR,
"accessExternalStylesheet\u30D7\u30ED\u30D1\u30C6\u30A3\u3067\u8A2D\u5B9A\u3055\u308C\u305F\u5236\u9650\u306B\u3088\u308A''{1}''\u30A2\u30AF\u30BB\u30B9\u304C\u8A31\u53EF\u3055\u308C\u3066\u3044\u306A\u3044\u305F\u3081\u3001\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u30FB\u30BF\u30FC\u30B2\u30C3\u30C8''{0}''\u3092\u8AAD\u307F\u53D6\u308C\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
/*
* Note to translators: This message represents an internal error in
* condition in XSLTC. The substitution text is the class name in XSLTC
* that is missing some functionality.
*/
{ErrorMsg.NOT_IMPLEMENTED_ERR,
"''{0}''\u304C\u5B9F\u88C5\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The XML document given to XSLTC as a stylesheet
* was not, in fact, a stylesheet.
*/
{ErrorMsg.NOT_STYLESHEET_ERR,
"\u5165\u529B\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u306BXSL\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u304C\u542B\u307E\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The element named in the substitution text was
* encountered in the stylesheet but is not recognized.
*/
{ErrorMsg.ELEMENT_PARSE_ERR,
"\u8981\u7D20''{0}''\u3092\u89E3\u6790\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F"},
/*
* Note to translators: "use", "<key>", "node", "node-set", "string"
* and "number" are keywords in this context and should not be
* translated. This message indicates that the value of the "use"
* attribute was not one of the permitted values.
*/
{ErrorMsg.KEY_USE_ATTR_ERR,
"<key>\u306Euse\u5C5E\u6027\u306F\u3001\u30CE\u30FC\u30C9\u3001\u30CE\u30FC\u30C9\u30BB\u30C3\u30C8\u3001\u6587\u5B57\u5217\u307E\u305F\u306F\u6570\u5024\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: An XML document can specify the version of the
* XML specification to which it adheres. This message indicates that
* the version specified for the output document was not valid.
*/
{ErrorMsg.OUTPUT_VERSION_ERR,
"\u51FA\u529BXML\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u306E\u30D0\u30FC\u30B8\u30E7\u30F3\u306F1.0\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059"},
/*
* Note to translators: The operator in a comparison operation was
* not recognized.
*/
{ErrorMsg.ILLEGAL_RELAT_OP_ERR,
"\u95A2\u4FC2\u5F0F\u306E\u4E0D\u660E\u306A\u6F14\u7B97\u5B50\u3067\u3059"},
/*
* Note to translators: An attribute set defines as a set of XML
* attributes that can be added to an element in the output XML document
* as a group. This message is reported if the name specified was not
* used to declare an attribute set. The substitution text is the name
* that is in error.
*/
{ErrorMsg.ATTRIBSET_UNDEF_ERR,
"\u5B58\u5728\u3057\u306A\u3044\u5C5E\u6027\u30BB\u30C3\u30C8''{0}''\u3092\u4F7F\u7528\u3057\u3088\u3046\u3068\u3057\u307E\u3057\u305F\u3002"},
/*
* Note to translators: The term "attribute value template" is a term
* defined by XSLT which describes the value of an attribute that is
* determined by an XPath expression. The message indicates that the
* expression was syntactically incorrect; the substitution text
* contains the expression that was in error.
*/
{ErrorMsg.ATTR_VAL_TEMPLATE_ERR,
"\u5C5E\u6027\u5024\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8''{0}''\u3092\u89E3\u6790\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: ???
*/
{ErrorMsg.UNKNOWN_SIG_TYPE_ERR,
"\u30AF\u30E9\u30B9''{0}''\u306E\u7F72\u540D\u306B\u4E0D\u660E\u306A\u30C7\u30FC\u30BF\u578B\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: The substitution text refers to data types.
* The message is displayed if a value in a particular context needs to
* be converted to type {1}, but that's not possible for a value of
* type {0}.
*/
{ErrorMsg.DATA_CONVERSION_ERR,
"\u30C7\u30FC\u30BF\u578B''{0}''\u3092''{1}''\u306B\u5909\u63DB\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "Templates" is a Java class name that should
* not be translated.
*/
{ErrorMsg.NO_TRANSLET_CLASS_ERR,
"\u3053\u306E\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8\u306B\u306F\u6709\u52B9\u306Atranslet\u30AF\u30E9\u30B9\u5B9A\u7FA9\u304C\u542B\u307E\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "Templates" is a Java class name that should
* not be translated.
*/
{ErrorMsg.NO_MAIN_TRANSLET_ERR,
"\u3053\u306E\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8\u306B\u306F\u540D\u524D''{0}''\u3092\u6301\u3064\u30AF\u30E9\u30B9\u304C\u542B\u307E\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The substitution text is the name of a class.
*/
{ErrorMsg.TRANSLET_CLASS_ERR,
"translet\u30AF\u30E9\u30B9''{0}''\u3092\u30ED\u30FC\u30C9\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
{ErrorMsg.TRANSLET_OBJECT_ERR,
"Translet\u30AF\u30E9\u30B9\u304C\u30ED\u30FC\u30C9\u3055\u308C\u307E\u3057\u305F\u304C\u3001translet\u30A4\u30F3\u30B9\u30BF\u30F3\u30B9\u3092\u4F5C\u6210\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "ErrorListener" is a Java interface name that
* should not be translated. The message indicates that the user tried
* to set an ErrorListener object on object of the class named in the
* substitution text with "null" Java value.
*/
{ErrorMsg.ERROR_LISTENER_NULL_ERR,
"''{0}''\u306EErrorListener\u3092null\u306B\u8A2D\u5B9A\u3057\u3088\u3046\u3068\u3057\u307E\u3057\u305F"},
/*
* Note to translators: StreamSource, SAXSource and DOMSource are Java
* interface names that should not be translated.
*/
{ErrorMsg.JAXP_UNKNOWN_SOURCE_ERR,
"StreamSource\u3001SAXSource\u304A\u3088\u3073DOMSource\u306E\u307F\u304CXSLTC\u306B\u3088\u3063\u3066\u30B5\u30DD\u30FC\u30C8\u3055\u308C\u3066\u3044\u307E\u3059"},
/*
* Note to translators: "Source" is a Java class name that should not
* be translated. The substitution text is the name of Java method.
*/
{ErrorMsg.JAXP_NO_SOURCE_ERR,
"''{0}''\u306B\u6E21\u3055\u308C\u305F\u30BD\u30FC\u30B9\u30FB\u30AA\u30D6\u30B8\u30A7\u30AF\u30C8\u306B\u30B3\u30F3\u30C6\u30F3\u30C4\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The message indicates that XSLTC failed to
* compile the stylesheet into a translet (class file).
*/
{ErrorMsg.JAXP_COMPILE_ERR,
"\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u3092\u30B3\u30F3\u30D1\u30A4\u30EB\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F"},
/*
* Note to translators: "TransformerFactory" is a class name. In this
* context, an attribute is a property or setting of the
* TransformerFactory object. The substitution text is the name of the
* unrecognised attribute. The method used to retrieve the attribute is
* "getAttribute", so it's not clear whether it would be best to
* translate the term "attribute".
*/
{ErrorMsg.JAXP_INVALID_ATTR_ERR,
"TransformerFactory\u306F\u5C5E\u6027''{0}''\u3092\u8A8D\u8B58\u3057\u307E\u305B\u3093\u3002"},
{ErrorMsg.JAXP_INVALID_ATTR_VALUE_ERR,
"''{0}''\u5C5E\u6027\u306B\u6307\u5B9A\u3055\u308C\u305F\u5024\u304C\u6B63\u3057\u304F\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "setResult()" and "startDocument()" are Java
* method names that should not be translated.
*/
{ErrorMsg.JAXP_SET_RESULT_ERR,
"setResult()\u306FstartDocument()\u3088\u308A\u3082\u524D\u306B\u547C\u3073\u51FA\u3059\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: "Transformer" is a Java interface name that
* should not be translated. A Transformer object should contained a
* reference to a translet object in order to be used for
* transformations; this message is produced if that requirement is not
* met.
*/
{ErrorMsg.JAXP_NO_TRANSLET_ERR,
"\u30C8\u30E9\u30F3\u30B9\u30D5\u30A9\u30FC\u30DE\u306B\u306F\u30AB\u30D7\u30BB\u30EB\u5316\u3055\u308C\u305Ftranslet\u30AA\u30D6\u30B8\u30A7\u30AF\u30C8\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The XML document that results from a
* transformation needs to be sent to an output handler object; this
* message is produced if that requirement is not met.
*/
{ErrorMsg.JAXP_NO_HANDLER_ERR,
"\u5909\u63DB\u7D50\u679C\u306B\u5BFE\u3057\u3066\u5B9A\u7FA9\u6E08\u306E\u51FA\u529B\u30CF\u30F3\u30C9\u30E9\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "Result" is a Java interface name in this
* context. The substitution text is a method name.
*/
{ErrorMsg.JAXP_NO_RESULT_ERR,
"''{0}''\u306B\u6E21\u3055\u308C\u305F\u7D50\u679C\u30AA\u30D6\u30B8\u30A7\u30AF\u30C8\u306F\u7121\u52B9\u3067\u3059\u3002"},
/*
* Note to translators: "Transformer" is a Java interface name. The
* user's program attempted to access an unrecognized property with the
* name specified in the substitution text. The method used to retrieve
* the property is "getOutputProperty", so it's not clear whether it
* would be best to translate the term "property".
*/
{ErrorMsg.JAXP_UNKNOWN_PROP_ERR,
"\u7121\u52B9\u306A\u30C8\u30E9\u30F3\u30B9\u30D5\u30A9\u30FC\u30DE\u30FB\u30D7\u30ED\u30D1\u30C6\u30A3''{0}''\u306B\u30A2\u30AF\u30BB\u30B9\u3057\u3088\u3046\u3068\u3057\u307E\u3057\u305F\u3002"},
/*
* Note to translators: SAX2DOM is the name of a Java class that should
* not be translated. This is an adapter in the sense that it takes a
* DOM object and converts it to something that uses the SAX API.
*/
{ErrorMsg.SAX2DOM_ADAPTER_ERR,
"SAX2DOM\u30A2\u30C0\u30D7\u30BF''{0}''\u3092\u4F5C\u6210\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
/*
* Note to translators: "XSLTCSource.build()" is a Java method name.
* "systemId" is an XML term that is short for "system identification".
*/
{ErrorMsg.XSLTC_SOURCE_ERR,
"systemId\u3092\u8A2D\u5B9A\u305B\u305A\u306BXSLTCSource.build()\u304C\u547C\u3073\u51FA\u3055\u308C\u307E\u3057\u305F\u3002"},
{ ErrorMsg.ER_RESULT_NULL,
"\u7D50\u679C\u306Fnull\u306B\u3067\u304D\u307E\u305B\u3093"},
/*
* Note to translators: This message indicates that the value argument
* of setParameter must be a valid Java Object.
*/
{ErrorMsg.JAXP_INVALID_SET_PARAM_VALUE,
"\u30D1\u30E9\u30E1\u30FC\u30BF{0}\u306F\u6709\u52B9\u306AJava\u30AA\u30D6\u30B8\u30A7\u30AF\u30C8\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059"},
{ErrorMsg.COMPILE_STDIN_ERR,
"-i\u30AA\u30D7\u30B7\u30E7\u30F3\u306F-o\u30AA\u30D7\u30B7\u30E7\u30F3\u3068\u3068\u3082\u306B\u4F7F\u7528\u3059\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: This message contains usage information for a
* means of invoking XSLTC from the command-line. The message is
* formatted for presentation in English. The strings <output>,
* <directory>, etc. indicate user-specified argument values, and can
* be translated - the argument <package> refers to a Java package, so
* it should be handled in the same way the term is handled for JDK
* documentation.
*/
{ErrorMsg.COMPILE_USAGE_STR,
"\u5F62\u5F0F\n java com.sun.org.apache.xalan.internal.xsltc.cmdline.Compile [-o <output>]\n [-d <directory>] [-j <jarfile>] [-p <package>]\n [-n] [-x] [-u] [-v] [-h] { <stylesheet> | -i }\n\nOPTIONS\n -o <output> \u540D\u524D<output>\u3092\u751F\u6210\u6E08translet\u306B\n \u5272\u308A\u5F53\u3066\u308B\u3002\u30C7\u30D5\u30A9\u30EB\u30C8\u3067\u306F\u3001translet\u540D\u306F\n <stylesheet>\u540D\u306B\u7531\u6765\u3057\u307E\u3059\u3002\u3053\u306E\u30AA\u30D7\u30B7\u30E7\u30F3\u306F\n \u8907\u6570\u306E\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u3092\u30B3\u30F3\u30D1\u30A4\u30EB\u3059\u308B\u5834\u5408\u306F\u7121\u8996\u3055\u308C\u307E\u3059\u3002\n -d <directory> translet\u306E\u5B9B\u5148\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u3092\u6307\u5B9A\u3059\u308B\n -j <jarfile> <jarfile>\u3067\u6307\u5B9A\u3055\u308C\u308B\u540D\u524D\u306Ejar\u30D5\u30A1\u30A4\u30EB\u306Btranslet\u30AF\u30E9\u30B9\u3092\n \u30D1\u30C3\u30B1\u30FC\u30B8\u3059\u308B\n -p <package> \u751F\u6210\u3055\u308C\u308B\u3059\u3079\u3066\u306Etranslet\u30AF\u30E9\u30B9\u306E\u30D1\u30C3\u30B1\u30FC\u30B8\u540D\n \u63A5\u982D\u8F9E\u3092\u6307\u5B9A\u3059\u308B\u3002\n -n \u30C6\u30F3\u30D7\u30EC\u30FC\u30C8\u306E\u30A4\u30F3\u30E9\u30A4\u30F3\u5316\u3092\u6709\u52B9\u306B\u3059\u308B(\u5E73\u5747\u3057\u3066\u30C7\u30D5\u30A9\u30EB\u30C8\u52D5\u4F5C\u306E\u65B9\u304C\n \u512A\u308C\u3066\u3044\u307E\u3059)\u3002\n -x \u8FFD\u52A0\u306E\u30C7\u30D0\u30C3\u30B0\u30FB\u30E1\u30C3\u30BB\u30FC\u30B8\u51FA\u529B\u3092\u30AA\u30F3\u306B\u3059\u308B\n -u <stylesheet>\u5F15\u6570\u3092URL\u3068\u3057\u3066\u89E3\u91C8\u3059\u308B\n -i \u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u3092stdin\u304B\u3089\u8AAD\u307F\u8FBC\u3080\u3053\u3068\u3092\u30B3\u30F3\u30D1\u30A4\u30E9\u306B\u5F37\u5236\u3059\u308B\n -v \u30B3\u30F3\u30D1\u30A4\u30E9\u306E\u30D0\u30FC\u30B8\u30E7\u30F3\u3092\u51FA\u529B\u3059\u308B\n -h \u3053\u306E\u4F7F\u7528\u65B9\u6CD5\u306E\u6587\u3092\u51FA\u529B\u3059\u308B\n"},
/*
* Note to translators: This message contains usage information for a
* means of invoking XSLTC from the command-line. The message is
* formatted for presentation in English. The strings <jarfile>,
* <document>, etc. indicate user-specified argument values, and can
* be translated - the argument <class> refers to a Java class, so it
* should be handled in the same way the term is handled for JDK
* documentation.
*/
{ErrorMsg.TRANSFORM_USAGE_STR,
"\u5F62\u5F0F \n java com.sun.org.apache.xalan.internal.xsltc.cmdline.Transform [-j <jarfile>]\n [-x] [-n <iterations>] {-u <document_url> | <document>}\n <class> [<param1>=<value1> ...]\n\n translet <class>\u3092\u4F7F\u7528\u3057\u3066\u3001<document>\u3067\u6307\u5B9A\u3055\u308C\u308B\n XML\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u3092\u5909\u63DB\u3059\u308B\u3002translet <class>\u306F\n \u30E6\u30FC\u30B6\u30FC\u306ECLASSPATH\u5185\u304B\u3001\u30AA\u30D7\u30B7\u30E7\u30F3\u3067\u6307\u5B9A\u3055\u308C\u305F<jarfile>\u5185\u306B\u3042\u308A\u307E\u3059\u3002\nOPTIONS\n -j <jarfile> translet\u3092\u30ED\u30FC\u30C9\u3059\u308Bjarfile\u3092\u6307\u5B9A\u3059\u308B\n -x \u8FFD\u52A0\u306E\u30C7\u30D0\u30C3\u30B0\u30FB\u30E1\u30C3\u30BB\u30FC\u30B8\u51FA\u529B\u3092\u30AA\u30F3\u306B\u3059\u308B\n -n <iterations> \u5909\u63DB\u3092<iterations>\u56DE\u5B9F\u884C\u3057\u3001\n \u30D7\u30ED\u30D5\u30A1\u30A4\u30EA\u30F3\u30B0\u60C5\u5831\u3092\u8868\u793A\u3059\u308B\n -u <document_url> XML\u5165\u529B\u30C9\u30AD\u30E5\u30E1\u30F3\u30C8\u3092URL\u3068\u3057\u3066\u6307\u5B9A\u3059\u308B\n"},
/*
* Note to translators: "<xsl:sort>", "<xsl:for-each>" and
* "<xsl:apply-templates>" are keywords that should not be translated.
* The message indicates that an xsl:sort element must be a child of
* one of the other kinds of elements mentioned.
*/
{ErrorMsg.STRAY_SORT_ERR,
"<xsl:sort>\u306F<xsl:for-each>\u307E\u305F\u306F<xsl:apply-templates>\u306E\u5185\u90E8\u3067\u306E\u307F\u4F7F\u7528\u3067\u304D\u307E\u3059\u3002"},
/*
* Note to translators: The message indicates that the encoding
* requested for the output document was on that requires support that
* is not available from the Java Virtual Machine being used to execute
* the program.
*/
{ErrorMsg.UNSUPPORTED_ENCODING,
"\u51FA\u529B\u30A8\u30F3\u30B3\u30FC\u30C7\u30A3\u30F3\u30B0''{0}''\u306F\u3053\u306EJVM\u3067\u306F\u30B5\u30DD\u30FC\u30C8\u3055\u308C\u3066\u3044\u307E\u305B\u3093\u3002"},
/*
* Note to translators: The message indicates that the XPath expression
* named in the substitution text was not well formed syntactically.
*/
{ErrorMsg.SYNTAX_ERR,
"''{0}''\u306B\u69CB\u6587\u30A8\u30E9\u30FC\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: The substitution text is the name of a Java
* class. The term "constructor" here is the Java term. The message is
* displayed if XSLTC could not find a constructor for the specified
* class.
*/
{ErrorMsg.CONSTRUCTOR_NOT_FOUND,
"\u5916\u90E8\u30B3\u30F3\u30B9\u30C8\u30E9\u30AF\u30BF''{0}''\u304C\u898B\u3064\u304B\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: "static" is the Java keyword. The substitution
* text is the name of a function. The first argument of that function
* is not of the required type.
*/
{ErrorMsg.NO_JAVA_FUNCT_THIS_REF,
"static\u3067\u306A\u3044Java\u95A2\u6570''{0}''\u306E\u6700\u521D\u306E\u5F15\u6570\u306F\u7121\u52B9\u306A\u30AA\u30D6\u30B8\u30A7\u30AF\u30C8\u53C2\u7167\u3067\u3059\u3002"},
/*
* Note to translators: An XPath expression was not of the type
* required in a particular context. The substitution text is the
* expression that was in error.
*/
{ErrorMsg.TYPE_CHECK_ERR,
"\u5F0F''{0}''\u306E\u30BF\u30A4\u30D7\u306E\u78BA\u8A8D\u4E2D\u306B\u30A8\u30E9\u30FC\u304C\u767A\u751F\u3057\u307E\u3057\u305F\u3002"},
/*
* Note to translators: An XPath expression was not of the type
* required in a particular context. However, the location of the
* problematic expression is unknown.
*/
{ErrorMsg.TYPE_CHECK_UNK_LOC_ERR,
"\u4E0D\u660E\u306A\u5834\u6240\u3067\u306E\u5F0F\u306E\u30BF\u30A4\u30D7\u306E\u78BA\u8A8D\u4E2D\u306B\u30A8\u30E9\u30FC\u304C\u767A\u751F\u3057\u307E\u3057\u305F\u3002"},
/*
* Note to translators: The substitution text is the name of a command-
* line option that was not recognized.
*/
{ErrorMsg.ILLEGAL_CMDLINE_OPTION_ERR,
"\u30B3\u30DE\u30F3\u30C9\u884C\u30AA\u30D7\u30B7\u30E7\u30F3''{0}''\u306F\u7121\u52B9\u3067\u3059\u3002"},
/*
* Note to translators: The substitution text is the name of a command-
* line option.
*/
{ErrorMsg.CMDLINE_OPT_MISSING_ARG_ERR,
"\u30B3\u30DE\u30F3\u30C9\u884C\u30AA\u30D7\u30B7\u30E7\u30F3''{0}''\u306B\u5FC5\u9808\u306E\u5F15\u6570\u304C\u3042\u308A\u307E\u305B\u3093\u3002"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text contains two error
* messages. The spacing before the second substitution text indents
* it the same amount as the first in English.
*/
{ErrorMsg.WARNING_PLUS_WRAPPED_MSG,
"WARNING: ''{0}''\n :{1}"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text is an error message.
*/
{ErrorMsg.WARNING_MSG,
"WARNING: ''{0}''"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text contains two error
* messages. The spacing before the second substitution text indents
* it the same amount as the first in English.
*/
{ErrorMsg.FATAL_ERR_PLUS_WRAPPED_MSG,
"FATAL ERROR: ''{0}''\n :{1}"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text is an error message.
*/
{ErrorMsg.FATAL_ERR_MSG,
"FATAL ERROR: ''{0}''"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text contains two error
* messages. The spacing before the second substitution text indents
* it the same amount as the first in English.
*/
{ErrorMsg.ERROR_PLUS_WRAPPED_MSG,
"ERROR: ''{0}''\n :{1}"},
/*
* Note to translators: This message is used to indicate the severity
* of another message. The substitution text is an error message.
*/
{ErrorMsg.ERROR_MSG,
"ERROR: ''{0}''"},
/*
* Note to translators: The substitution text is the name of a class.
*/
{ErrorMsg.TRANSFORM_WITH_TRANSLET_STR,
"translet ''{0}''\u3092\u4F7F\u7528\u3057\u3066\u5909\u63DB\u3057\u307E\u3059 "},
/*
* Note to translators: The first substitution is the name of a class,
* while the second substitution is the name of a jar file.
*/
{ErrorMsg.TRANSFORM_WITH_JAR_STR,
"translet ''{0}''\u3092\u4F7F\u7528\u3057\u3066jar\u30D5\u30A1\u30A4\u30EB''{1}''\u304B\u3089\u5909\u63DB\u3057\u307E\u3059"},
/*
* Note to translators: "TransformerFactory" is the name of a Java
* interface and must not be translated. The substitution text is
* the name of the class that could not be instantiated.
*/
{ErrorMsg.COULD_NOT_CREATE_TRANS_FACT,
"TransformerFactory\u30AF\u30E9\u30B9''{0}''\u306E\u30A4\u30F3\u30B9\u30BF\u30F3\u30B9\u3092\u4F5C\u6210\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F\u3002"},
/*
* Note to translators: This message is produced when the user
* specified a name for the translet class that contains characters
* that are not permitted in a Java class name. The substitution
* text "{0}" specifies the name the user requested, while "{1}"
* specifies the name the processor used instead.
*/
{ErrorMsg.TRANSLET_NAME_JAVA_CONFLICT,
"\u540D\u524D''{0}''\u306B\u306FJava\u30AF\u30E9\u30B9\u306E\u540D\u524D\u306B\u8A31\u53EF\u3055\u308C\u3066\u3044\u306A\u3044\u6587\u5B57\u304C\u542B\u307E\u308C\u3066\u3044\u308B\u305F\u3081\u3001translet\u30AF\u30E9\u30B9\u306E\u540D\u524D\u3068\u3057\u3066\u4F7F\u7528\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F\u3002\u540D\u524D''{1}''\u304C\u304B\u308F\u308A\u306B\u4F7F\u7528\u3055\u308C\u307E\u3059\u3002"},
/*
* Note to translators: The following message is used as a header.
* All the error messages are collected together and displayed beneath
* this message.
*/
{ErrorMsg.COMPILER_ERROR_KEY,
"\u30B3\u30F3\u30D1\u30A4\u30E9\u30FB\u30A8\u30E9\u30FC:"},
/*
* Note to translators: The following message is used as a header.
* All the warning messages are collected together and displayed
* beneath this message.
*/
{ErrorMsg.COMPILER_WARNING_KEY,
"\u30B3\u30F3\u30D1\u30A4\u30E9\u306E\u8B66\u544A:"},
/*
* Note to translators: The following message is used as a header.
* All the error messages that are produced when the stylesheet is
* applied to an input document are collected together and displayed
* beneath this message. A 'translet' is the compiled form of a
* stylesheet (see above).
*/
{ErrorMsg.RUNTIME_ERROR_KEY,
"Translet\u30A8\u30E9\u30FC:"},
/*
* Note to translators: An attribute whose value is constrained to
* be a "QName" or a list of "QNames" had a value that was incorrect.
* 'QName' is an XML syntactic term that must not be translated. The
* substitution text contains the actual value of the attribute.
*/
{ErrorMsg.INVALID_QNAME_ERR,
"\u5024\u304C1\u3064\u306EQName\u307E\u305F\u306FQName\u306E\u7A7A\u767D\u6587\u5B57\u533A\u5207\u308A\u30EA\u30B9\u30C8\u3067\u3042\u308B\u3053\u3068\u304C\u5FC5\u8981\u306A\u5C5E\u6027\u306E\u5024\u304C''{0}''\u3067\u3057\u305F"},
/*
* Note to translators: An attribute whose value is required to
* be an "NCName".
* 'NCName' is an XML syntactic term that must not be translated. The
* substitution text contains the actual value of the attribute.
*/
{ErrorMsg.INVALID_NCNAME_ERR,
"\u5024\u304CNCName\u3067\u3042\u308B\u3053\u3068\u304C\u5FC5\u8981\u306A\u5C5E\u6027\u306E\u5024\u304C''{0}''\u3067\u3057\u305F"},
/*
* Note to translators: An attribute with an incorrect value was
* encountered. The permitted value is one of the literal values
* "xml", "html" or "text"; it is also permitted to have the form of
* a QName that is not also an NCName. The terms "method",
* "xsl:output", "xml", "html" and "text" are keywords that must not
* be translated. The term "qname-but-not-ncname" is an XML syntactic
* term. The substitution text contains the actual value of the
* attribute.
*/
{ErrorMsg.INVALID_METHOD_IN_OUTPUT,
"<xsl:output>\u8981\u7D20\u306E\u30E1\u30BD\u30C3\u30C9\u5C5E\u6027\u306E\u5024\u304C''{0}''\u3067\u3057\u305F\u3002\u5024\u306F''xml''\u3001''html''\u3001''text''\u307E\u305F\u306Fqname-but-not-ncname\u306E\u3044\u305A\u308C\u304B\u3067\u3042\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059"},
{ErrorMsg.JAXP_GET_FEATURE_NULL_NAME,
"\u6A5F\u80FD\u540D\u306FTransformerFactory.getFeature(String name)\u5185\u3067null\u306B\u3067\u304D\u307E\u305B\u3093\u3002"},
{ErrorMsg.JAXP_SET_FEATURE_NULL_NAME,
"\u6A5F\u80FD\u540D\u306FTransformerFactory.setFeature(String name, boolean value)\u5185\u3067null\u306B\u3067\u304D\u307E\u305B\u3093\u3002"},
{ErrorMsg.JAXP_UNSUPPORTED_FEATURE,
"\u6A5F\u80FD''{0}''\u3092\u3053\u306ETransformerFactory\u306B\u8A2D\u5B9A\u3067\u304D\u307E\u305B\u3093\u3002"},
{ErrorMsg.JAXP_SECUREPROCESSING_FEATURE,
"FEATURE_SECURE_PROCESSING: \u30BB\u30AD\u30E5\u30EA\u30C6\u30A3\u30FB\u30DE\u30CD\u30FC\u30B8\u30E3\u304C\u5B58\u5728\u3059\u308B\u3068\u304D\u3001\u6A5F\u80FD\u3092false\u306B\u8A2D\u5B9A\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: This message describes an internal error in the
* processor. The term "byte code" is a Java technical term for the
* executable code in a Java method, and "try-catch-finally block"
* refers to the Java keywords with those names. "Outlined" is a
* technical term internal to XSLTC and should not be translated.
*/
{ErrorMsg.OUTLINE_ERR_TRY_CATCH,
"\u5185\u90E8XSLTC\u30A8\u30E9\u30FC: \u751F\u6210\u3055\u308C\u305F\u30D0\u30A4\u30C8\u30FB\u30B3\u30FC\u30C9\u306F\u3001try-catch-finally\u30D6\u30ED\u30C3\u30AF\u3092\u542B\u3093\u3067\u3044\u308B\u305F\u3081\u3001\u30A2\u30A6\u30C8\u30E9\u30A4\u30F3\u5316\u3067\u304D\u307E\u305B\u3093\u3002"},
/*
* Note to translators: This message describes an internal error in the
* processor. The terms "OutlineableChunkStart" and
* "OutlineableChunkEnd" are the names of classes internal to XSLTC and
* should not be translated. The message indicates that for every
* "start" there must be a corresponding "end", and vice versa, and
* that if one of a pair of "start" and "end" appears between another
* pair of corresponding "start" and "end", then the other half of the
* pair must also be between that same enclosing pair.
*/
{ErrorMsg.OUTLINE_ERR_UNBALANCED_MARKERS,
"\u5185\u90E8XSLTC\u30A8\u30E9\u30FC: OutlineableChunkStart\u30DE\u30FC\u30AB\u30FC\u3068OutlineableChunkEnd\u30DE\u30FC\u30AB\u30FC\u306F\u3001\u5BFE\u306B\u306A\u3063\u3066\u304A\u308A\u3001\u304B\u3064\u6B63\u3057\u304F\u30CD\u30B9\u30C8\u3055\u308C\u3066\u3044\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002"},
/*
* Note to translators: This message describes an internal error in the
* processor. The term "byte code" is a Java technical term for the
* executable code in a Java method. The "method" that is being
* referred to is a Java method in a translet that XSLTC is generating
* in processing a stylesheet. The "instruction" that is being
* referred to is one of the instrutions in the Java byte code in that
* method. "Outlined" is a technical term internal to XSLTC and
* should not be translated.
*/
{ErrorMsg.OUTLINE_ERR_DELETED_TARGET,
"\u5185\u90E8XSLTC\u30A8\u30E9\u30FC: \u30A2\u30A6\u30C8\u30E9\u30A4\u30F3\u5316\u3055\u308C\u305F\u30D0\u30A4\u30C8\u30FB\u30B3\u30FC\u30C9\u306E\u30D6\u30ED\u30C3\u30AF\u306E\u4E00\u90E8\u3067\u3042\u3063\u305F\u547D\u4EE4\u306F\u3001\u5143\u306E\u30E1\u30BD\u30C3\u30C9\u306E\u4E2D\u3067\u307E\u3060\u53C2\u7167\u3055\u308C\u3066\u3044\u307E\u3059\u3002"
},
/*
* Note to translators: This message describes an internal error in the
* processor. The "method" that is being referred to is a Java method
* in a translet that XSLTC is generating.
*
*/
{ErrorMsg.OUTLINE_ERR_METHOD_TOO_BIG,
"\u5185\u90E8XSLTC\u30A8\u30E9\u30FC: \u30C8\u30E9\u30F3\u30B9\u30EC\u30C3\u30C8\u5185\u306E\u30E1\u30BD\u30C3\u30C9\u304C\u3001Java\u4EEE\u60F3\u30DE\u30B7\u30F3\u306E\u5236\u9650(1\u30E1\u30BD\u30C3\u30C9\u306E\u9577\u3055\u306F\u6700\u592764\u30AD\u30ED\u30D0\u30A4\u30C8)\u3092\u8D85\u3048\u3066\u3044\u307E\u3059\u3002\u4E00\u822C\u7684\u306B\u3001\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u5185\u306E\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8\u306E\u30B5\u30A4\u30BA\u304C\u5927\u304D\u904E\u304E\u308B\u3053\u3068\u304C\u539F\u56E0\u3068\u3057\u3066\u8003\u3048\u3089\u308C\u307E\u3059\u3002\u5C0F\u3055\u3044\u30B5\u30A4\u30BA\u306E\u30C6\u30F3\u30D7\u30EC\u30FC\u30C8\u3092\u4F7F\u7528\u3057\u3066\u3001\u30B9\u30BF\u30A4\u30EB\u30B7\u30FC\u30C8\u3092\u518D\u69CB\u6210\u3057\u3066\u304F\u3060\u3055\u3044\u3002"
},
{ErrorMsg.DESERIALIZE_TRANSLET_ERR, "Java\u30BB\u30AD\u30E5\u30EA\u30C6\u30A3\u304C\u6709\u52B9\u5316\u3055\u308C\u3066\u3044\u308B\u5834\u5408\u3001TemplatesImpl\u306E\u30C7\u30B7\u30EA\u30A2\u30E9\u30A4\u30BA\u306E\u30B5\u30DD\u30FC\u30C8\u306F\u7121\u52B9\u5316\u3055\u308C\u307E\u3059\u3002\u3053\u308C\u306F\u3001jdk.xml.enableTemplatesImplDeserialization\u30B7\u30B9\u30C6\u30E0\u30FB\u30D7\u30ED\u30D1\u30C6\u30A3\u3092true\u306B\u8A2D\u5B9A\u3057\u3066\u30AA\u30FC\u30D0\u30FC\u30E9\u30A4\u30C9\u3067\u304D\u307E\u3059\u3002"}
};
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
* Fast Positive Hash.
*
* Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
* The 1Hippeus project (t1h).
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgement in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
/*
* t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
* by [Positive Technologies](https://www.ptsecurity.ru)
*
* Briefly, it is a 64-bit Hash Function:
* 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
* but portable and without penalties it can run on any 64-bit CPU.
* 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
* and all others portable hash-functions (which do not use specific
* hardware tricks).
* 3. Not suitable for cryptography.
*
* The Future will (be) Positive. Всё будет хорошо.
*
* ACKNOWLEDGEMENT:
* The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
* for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
*/
#ifndef T1HA0_DISABLED
#include "t1ha_bits.h"
#include "t1ha_selfcheck.h"
static __maybe_unused __always_inline uint32_t tail32_le_aligned(const void *v,
size_t tail) {
const uint8_t *const p = (const uint8_t *)v;
#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
/* We can perform a 'oneshot' read, which is little bit faster. */
const unsigned shift = ((4 - tail) & 3) << 3;
return fetch32_le_aligned(p) & ((~UINT32_C(0)) >> shift);
#else
uint32_t r = 0;
switch (tail & 3) {
default:
unreachable();
/* fall through */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* For most CPUs this code is better when not needed
* copying for alignment or byte reordering. */
case 0:
return fetch32_le_aligned(p);
case 3:
r = (uint32_t)p[2] << 16;
/* fall through */
case 2:
return r + fetch16_le_aligned(p);
case 1:
return p[0];
#else
case 0:
r += p[3];
r <<= 8;
/* fall through */
case 3:
r += p[2];
r <<= 8;
/* fall through */
case 2:
r += p[1];
r <<= 8;
/* fall through */
case 1:
return r + p[0];
#endif
}
#endif /* T1HA_USE_FAST_ONESHOT_READ */
}
static __maybe_unused __always_inline uint32_t
tail32_le_unaligned(const void *v, size_t tail) {
const uint8_t *p = (const uint8_t *)v;
#ifdef can_read_underside
/* On some systems (e.g. x86) we can perform a 'oneshot' read, which
* is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
* for the reminder. */
const unsigned offset = (4 - tail) & 3;
const unsigned shift = offset << 3;
if (likely(can_read_underside(p, 4))) {
p -= offset;
return fetch32_le_unaligned(p) >> shift;
}
return fetch32_le_unaligned(p) & ((~UINT32_C(0)) >> shift);
#else
uint32_t r = 0;
switch (tail & 3) {
default:
unreachable();
/* fall through */
#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* For most CPUs this code is better when not needed
* copying for alignment or byte reordering. */
case 0:
return fetch32_le_unaligned(p);
case 3:
r = (uint32_t)p[2] << 16;
/* fall through */
case 2:
return r + fetch16_le_unaligned(p);
case 1:
return p[0];
#else
/* For most CPUs this code is better than a
* copying for alignment and/or byte reordering. */
case 0:
r += p[3];
r <<= 8;
/* fall through */
case 3:
r += p[2];
r <<= 8;
/* fall through */
case 2:
r += p[1];
r <<= 8;
/* fall through */
case 1:
return r + p[0];
#endif
}
#endif /* can_read_underside */
}
static __maybe_unused __always_inline uint32_t tail32_be_aligned(const void *v,
size_t tail) {
const uint8_t *const p = (const uint8_t *)v;
#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
/* We can perform a 'oneshot' read, which is little bit faster. */
const unsigned shift = ((4 - tail) & 3) << 3;
return fetch32_be_aligned(p) >> shift;
#else
switch (tail & 3) {
default:
unreachable();
/* fall through */
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
/* For most CPUs this code is better when not needed
* copying for alignment or byte reordering. */
case 1:
return p[0];
case 2:
return fetch16_be_aligned(p);
case 3:
return fetch16_be_aligned(p) << 8 | p[2];
case 0:
return fetch32_be_aligned(p);
#else
case 1:
return p[0];
case 2:
return p[1] | (uint32_t)p[0] << 8;
case 3:
return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
case 0:
return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
(uint32_t)p[0] << 24;
#endif
}
#endif /* T1HA_USE_FAST_ONESHOT_READ */
}
static __maybe_unused __always_inline uint32_t
tail32_be_unaligned(const void *v, size_t tail) {
const uint8_t *p = (const uint8_t *)v;
#ifdef can_read_underside
/* On some systems we can perform a 'oneshot' read, which is little bit
* faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com> for the
* reminder. */
const unsigned offset = (4 - tail) & 3;
const unsigned shift = offset << 3;
if (likely(can_read_underside(p, 4))) {
p -= offset;
return fetch32_be_unaligned(p) & ((~UINT32_C(0)) >> shift);
}
return fetch32_be_unaligned(p) >> shift;
#else
switch (tail & 3) {
default:
unreachable();
/* fall through */
#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
/* For most CPUs this code is better when not needed
* copying for alignment or byte reordering. */
case 1:
return p[0];
case 2:
return fetch16_be_unaligned(p);
case 3:
return fetch16_be_unaligned(p) << 8 | p[2];
case 0:
return fetch32_be_unaligned(p);
#else
/* For most CPUs this code is better than a
* copying for alignment and/or byte reordering. */
case 1:
return p[0];
case 2:
return p[1] | (uint32_t)p[0] << 8;
case 3:
return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
case 0:
return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
(uint32_t)p[0] << 24;
#endif
}
#endif /* can_read_underside */
}
/***************************************************************************/
#ifndef rot32
static __maybe_unused __always_inline uint32_t rot32(uint32_t v, unsigned s) {
return (v >> s) | (v << (32 - s));
}
#endif /* rot32 */
static __always_inline void mixup32(uint32_t *a, uint32_t *b, uint32_t v,
uint32_t prime) {
uint64_t l = mul_32x32_64(*b + v, prime);
*a ^= (uint32_t)l;
*b += (uint32_t)(l >> 32);
}
static __always_inline uint64_t final32(uint32_t a, uint32_t b) {
uint64_t l = (b ^ rot32(a, 13)) | (uint64_t)a << 32;
l *= prime_0;
l ^= l >> 41;
l *= prime_4;
l ^= l >> 47;
l *= prime_6;
return l;
}
/* 32-bit 'magic' primes */
static const uint32_t prime32_0 = UINT32_C(0x92D78269);
static const uint32_t prime32_1 = UINT32_C(0xCA9B4735);
static const uint32_t prime32_2 = UINT32_C(0xA4ABA1C3);
static const uint32_t prime32_3 = UINT32_C(0xF6499843);
static const uint32_t prime32_4 = UINT32_C(0x86F0FD61);
static const uint32_t prime32_5 = UINT32_C(0xCA2DA6FB);
static const uint32_t prime32_6 = UINT32_C(0xC4BB3575);
/* TODO: C++ template in the next version */
#define T1HA0_BODY(ENDIANNES, ALIGNESS) \
const uint32_t *v = (const uint32_t *)data; \
if (unlikely(len > 16)) { \
uint32_t c = ~a; \
uint32_t d = rot32(b, 5); \
const uint32_t *detent = \
(const uint32_t *)((const uint8_t *)data + len - 15); \
do { \
const uint32_t w0 = fetch32_##ENDIANNES##_##ALIGNESS(v + 0); \
const uint32_t w1 = fetch32_##ENDIANNES##_##ALIGNESS(v + 1); \
const uint32_t w2 = fetch32_##ENDIANNES##_##ALIGNESS(v + 2); \
const uint32_t w3 = fetch32_##ENDIANNES##_##ALIGNESS(v + 3); \
v += 4; \
prefetch(v); \
\
const uint32_t d13 = w1 + rot32(w3 + d, 17); \
const uint32_t c02 = w0 ^ rot32(w2 + c, 11); \
d ^= rot32(a + w0, 3); \
c ^= rot32(b + w1, 7); \
b = prime32_1 * (c02 + w3); \
a = prime32_0 * (d13 ^ w2); \
} while (likely(v < detent)); \
\
c += a; \
d += b; \
a ^= prime32_6 * (rot32(c, 16) + d); \
b ^= prime32_5 * (c + rot32(d, 16)); \
\
len &= 15; \
} \
\
switch (len) { \
default: \
mixup32(&a, &b, fetch32_##ENDIANNES##_##ALIGNESS(v++), prime32_4); \
/* fall through */ \
case 12: \
case 11: \
case 10: \
case 9: \
mixup32(&b, &a, fetch32_##ENDIANNES##_##ALIGNESS(v++), prime32_3); \
/* fall through */ \
case 8: \
case 7: \
case 6: \
case 5: \
mixup32(&a, &b, fetch32_##ENDIANNES##_##ALIGNESS(v++), prime32_2); \
/* fall through */ \
case 4: \
case 3: \
case 2: \
case 1: \
mixup32(&b, &a, tail32_##ENDIANNES##_##ALIGNESS(v, len), prime32_1); \
/* fall through */ \
case 0: \
return final32(a, b); \
}
uint64_t t1ha0_32le(const void *data, size_t len, uint64_t seed) {
uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed;
uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32);
#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
T1HA0_BODY(le, unaligned);
#else
const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_32 - 1)) != 0;
if (misaligned) {
T1HA0_BODY(le, unaligned);
} else {
T1HA0_BODY(le, aligned);
}
#endif
}
uint64_t t1ha0_32be(const void *data, size_t len, uint64_t seed) {
uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed;
uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32);
#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
T1HA0_BODY(be, unaligned);
#else
const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_32 - 1)) != 0;
if (misaligned) {
T1HA0_BODY(be, unaligned);
} else {
T1HA0_BODY(be, aligned);
}
#endif
}
/***************************************************************************/
#if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
__cold uint64_t t1ha_ia32cpu_features(void) {
uint32_t features = 0;
uint32_t extended = 0;
#ifdef __GNUC__
uint32_t eax, ebx, ecx, edx;
const unsigned cpuid_max = __get_cpuid_max(0, NULL);
if (cpuid_max >= 1) {
__cpuid_count(1, 0, eax, ebx, features, edx);
if (cpuid_max >= 7)
__cpuid_count(7, 0, eax, extended, ecx, edx);
}
#elif defined(_MSC_VER)
int info[4];
__cpuid(info, 0);
const unsigned cpuid_max = info[0];
if (cpuid_max >= 1) {
__cpuidex(info, 1, 0);
features = info[2];
if (cpuid_max >= 7) {
__cpuidex(info, 7, 0);
extended = info[1];
}
}
#endif
return features | (uint64_t)extended << 32;
}
#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */
#if T1HA0_RUNTIME_SELECT
__cold t1ha0_function_t t1ha0_resolve(void) {
#if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
uint64_t features = t1ha_ia32cpu_features();
if (t1ha_ia32_AESNI_avail(features)) {
if (t1ha_ia32_AVX_avail(features))
return t1ha_ia32_AVX2_avail(features) ? t1ha0_ia32aes_avx2
: t1ha0_ia32aes_avx;
return t1ha0_ia32aes_noavx;
}
#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
(!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
#ifndef T1HA1_DISABLED
return t1ha1_be;
#else
return t1ha2_atonce;
#endif /* T1HA1_DISABLED */
#else
return t1ha0_32be;
#endif
#else /* __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
(!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
#ifndef T1HA1_DISABLED
return t1ha1_le;
#else
return t1ha2_atonce;
#endif /* T1HA1_DISABLED */
#else
return t1ha0_32le;
#endif
#endif /* __BYTE_ORDER__ */
}
#if T1HA_USE_INDIRECT_FUNCTIONS
/* Use IFUNC (GNU ELF indirect functions) to choice implementation at runtime.
* For more info please see
* https://en.wikipedia.org/wiki/Executable_and_Linkable_Format
* and https://sourceware.org/glibc/wiki/GNU_IFUNC */
#if __has_attribute(__ifunc__)
uint64_t t1ha0(const void *data, size_t len, uint64_t seed)
__attribute__((__ifunc__("t1ha0_resolve")));
#else
__asm("\t.globl\tt1ha0\n\t.type\tt1ha0, "
"%gnu_indirect_function\n\t.set\tt1ha0,t1ha0_resolve");
#endif /* __has_attribute(__ifunc__) */
#elif __GNUC_PREREQ(4, 0) || __has_attribute(__constructor__)
uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t);
static __cold void __attribute__((__constructor__)) t1ha0_init(void) {
t1ha0_funcptr = t1ha0_resolve();
}
#else /* T1HA_USE_INDIRECT_FUNCTIONS */
static __cold uint64_t t1ha0_proxy(const void *data, size_t len,
uint64_t seed) {
t1ha0_funcptr = t1ha0_resolve();
return t1ha0_funcptr(data, len, seed);
}
uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t) = t1ha0_proxy;
#endif /* !T1HA_USE_INDIRECT_FUNCTIONS */
#endif /* T1HA0_RUNTIME_SELECT */
#endif /* T1HA0_DISABLED */
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/python -u
#
# Copyright (c) 2013 by Dhiru Kholia, <dhiru (at) openwall.com>
#
# Python Bindings for LZMA
#
# Copyright (c) 2004-2010 by Joachim Bauch, mail@joachim-bauch.de
# 7-Zip Copyright (C) 1999-2010 Igor Pavlov
# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""Read from and write to 7zip format archives.
"""
from binascii import unhexlify
from datetime import datetime
try:
import pylzma
# To install pylzma on Ubuntu:
# apt-get install python-pip python-dev
# pip install pylzma # may do as non-root user in group staff
except ImportError:
pass
from struct import pack, unpack
from zlib import crc32
import zlib
import bz2
import binascii
import StringIO
import sys
import os
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
from functools import reduce
except ImportError:
# reduce is available in functools starting with Python 2.6
pass
try:
from pytz import UTC
except ImportError:
# pytz is optional, define own "UTC" timestamp
# reference implementation from Python documentation
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
try:
unicode
except NameError:
# Python 3.x
def unicode(s, encoding):
return s
else:
def bytes(s, encoding):
return s
READ_BLOCKSIZE = 16384
MAGIC_7Z = unhexlify('377abcaf271c') # '7z\xbc\xaf\x27\x1c'
PROPERTY_END = unhexlify('00') # '\x00'
PROPERTY_HEADER = unhexlify('01') # '\x01'
PROPERTY_ARCHIVE_PROPERTIES = unhexlify('02') # '\x02'
PROPERTY_ADDITIONAL_STREAMS_INFO = unhexlify('03') # '\x03'
PROPERTY_MAIN_STREAMS_INFO = unhexlify('04') # '\x04'
PROPERTY_FILES_INFO = unhexlify('05') # '\x05'
PROPERTY_PACK_INFO = unhexlify('06') # '\x06'
PROPERTY_UNPACK_INFO = unhexlify('07') # '\x07'
PROPERTY_SUBSTREAMS_INFO = unhexlify('08') # '\x08'
PROPERTY_SIZE = unhexlify('09') # '\x09'
PROPERTY_CRC = unhexlify('0a') # '\x0a'
PROPERTY_FOLDER = unhexlify('0b') # '\x0b'
PROPERTY_CODERS_UNPACK_SIZE = unhexlify('0c') # '\x0c'
PROPERTY_NUM_UNPACK_STREAM = unhexlify('0d') # '\x0d'
PROPERTY_EMPTY_STREAM = unhexlify('0e') # '\x0e'
PROPERTY_EMPTY_FILE = unhexlify('0f') # '\x0f'
PROPERTY_ANTI = unhexlify('10') # '\x10'
PROPERTY_NAME = unhexlify('11') # '\x11'
PROPERTY_CREATION_TIME = unhexlify('12') # '\x12'
PROPERTY_LAST_ACCESS_TIME = unhexlify('13') # '\x13'
PROPERTY_LAST_WRITE_TIME = unhexlify('14') # '\x14'
PROPERTY_ATTRIBUTES = unhexlify('15') # '\x15'
PROPERTY_COMMENT = unhexlify('16') # '\x16'
PROPERTY_ENCODED_HEADER = unhexlify('17') # '\x17'
COMPRESSION_METHOD_COPY = unhexlify('00') # '\x00'
COMPRESSION_METHOD_LZMA = unhexlify('03') # '\x03'
COMPRESSION_METHOD_CRYPTO = unhexlify('06') # '\x06'
COMPRESSION_METHOD_MISC = unhexlify('04') # '\x04'
COMPRESSION_METHOD_MISC_ZIP = unhexlify('0401') # '\x04\x01'
COMPRESSION_METHOD_MISC_BZIP = unhexlify('0402') # '\x04\x02'
COMPRESSION_METHOD_7Z_AES256_SHA256 = unhexlify('06f10701') # '\x06\xf1\x07\x01'
# number of seconds between 1601/01/01 and 1970/01/01 (UTC)
# used to adjust 7z FILETIME to Python timestamp
TIMESTAMP_ADJUST = -11644473600
def toTimestamp(filetime):
"""Convert 7z FILETIME to Python timestamp."""
# FILETIME is 100-nanosecond intervals since 1601/01/01 (UTC)
return (filetime / 10000000.0) + TIMESTAMP_ADJUST
class ArchiveError(Exception):
pass
class FormatError(ArchiveError):
pass
class EncryptedArchiveError(ArchiveError):
pass
class UnsupportedCompressionMethodError(ArchiveError):
pass
class DecryptionError(ArchiveError):
pass
class NoPasswordGivenError(DecryptionError):
pass
class WrongPasswordError(DecryptionError):
pass
class ArchiveTimestamp(long):
"""Windows FILETIME timestamp."""
def __repr__(self):
return '%s(%d)' % (type(self).__name__, self)
def as_datetime(self):
"""Convert FILETIME to Python datetime object."""
return datetime.fromtimestamp(toTimestamp(self), UTC)
class Base(object):
""" base class with support for various basic read/write functions """
def _readReal64Bit(self, file):
res = file.read(8)
a, b = unpack('<LL', res)
return b << 32 | a, res
def _read64Bit(self, file):
b = ord(file.read(1))
mask = 0x80
for i in range(8):
if b & mask == 0:
bytes = list(unpack('%dB' % i, file.read(i)))
bytes.reverse()
value = (bytes and reduce(lambda x, y: x << 8 | y, bytes)) or 0
highpart = b & (mask - 1)
return value + (highpart << (i * 8))
mask >>= 1
def _readBoolean(self, file, count, checkall=0):
if checkall:
alldefined = file.read(1)
if alldefined != unhexlify('00'):
return [True] * count
result = []
b = 0
mask = 0
for i in range(count):
if mask == 0:
b = ord(file.read(1))
mask = 0x80
result.append(b & mask != 0)
mask >>= 1
return result
def checkcrc(self, crc, data):
check = crc32(data) & 0xffffffff
return crc == check
class PackInfo(Base):
""" informations about packed streams """
def __init__(self, file):
self.packpos = self._read64Bit(file)
self.numstreams = self._read64Bit(file)
id = file.read(1)
if id == PROPERTY_SIZE:
self.packsizes = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id == PROPERTY_CRC:
self.crcs = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class Folder(Base):
""" a "Folder" represents a stream of compressed data """
def __init__(self, file):
numcoders = self._read64Bit(file)
self.numcoders = numcoders
self.coders = []
self.digestdefined = False
totalin = 0
self.totalout = 0
for i in range(numcoders):
while True:
b = ord(file.read(1))
methodsize = b & 0xf
issimple = b & 0x10 == 0
noattributes = b & 0x20 == 0
last_alternative = b & 0x80 == 0
c = {}
c['method'] = file.read(methodsize)
if not issimple:
c['numinstreams'] = self._read64Bit(file)
c['numoutstreams'] = self._read64Bit(file)
else:
c['numinstreams'] = 1
c['numoutstreams'] = 1
totalin += c['numinstreams']
self.totalout += c['numoutstreams']
if not noattributes:
c['properties'] = file.read(self._read64Bit(file))
self.coders.append(c)
if last_alternative:
break
numbindpairs = self.totalout - 1
self.bindpairs = []
for i in range(numbindpairs):
self.bindpairs.append((self._read64Bit(file), self._read64Bit(file), ))
numpackedstreams = totalin - numbindpairs
self.numpackedstreams = numpackedstreams
self.packed_indexes = []
if numpackedstreams == 1:
for i in range(totalin):
if self.findInBindPair(i) < 0:
self.packed_indexes.append(i)
elif numpackedstreams > 1:
for i in range(numpackedstreams):
self.packed_indexes.append(self._read64Bit(file))
def getUnpackSize(self):
if not self.unpacksizes:
return 0
r = list(range(len(self.unpacksizes)))
r.reverse()
for i in r:
if self.findOutBindPair(i):
return self.unpacksizes[i]
raise TypeError('not found')
def findInBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if a == index:
return idx
return -1
def findOutBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if b == index:
return idx
return -1
class Digests(Base):
""" holds a list of checksums """
def __init__(self, file, count):
self.defined = self._readBoolean(file, count, checkall=1)
self.crcs = [unpack('<L', file.read(4))[0] for x in range(count)]
UnpackDigests = Digests
class UnpackInfo(Base):
""" combines multiple folders """
def __init__(self, file):
id = file.read(1)
if id != PROPERTY_FOLDER:
raise FormatError('folder id expected but %s found' % repr(id))
self.numfolders = self._read64Bit(file)
self.folders = []
external = file.read(1)
if external == unhexlify('00'):
self.folders = [Folder(file) for x in range(self.numfolders)]
elif external == unhexlify('01'):
self.datastreamidx = self._read64Bit(file)
else:
raise FormatError('0x00 or 0x01 expected but %s found' % repr(external))
id = file.read(1)
if id != PROPERTY_CODERS_UNPACK_SIZE:
raise FormatError('coders unpack size id expected but %s found' % repr(id))
for folder in self.folders:
folder.unpacksizes = [self._read64Bit(file) for x in range(folder.totalout)]
id = file.read(1)
if id == PROPERTY_CRC:
digests = UnpackDigests(file, self.numfolders)
for idx in range(self.numfolders):
folder = self.folders[idx]
folder.digestdefined = digests.defined[idx]
folder.crc = digests.crcs[idx]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class SubstreamsInfo(Base):
""" defines the substreams of a folder """
def __init__(self, file, numfolders, folders):
self.digests = []
self.digestsdefined = []
id = file.read(1)
if id == PROPERTY_NUM_UNPACK_STREAM:
self.numunpackstreams = [self._read64Bit(file) for x in range(numfolders)]
id = file.read(1)
else:
self.numunpackstreams = []
for idx in range(numfolders):
self.numunpackstreams.append(1)
if id == PROPERTY_SIZE:
sum = 0
self.unpacksizes = []
for i in range(len(self.numunpackstreams)):
for j in range(1, self.numunpackstreams[i]):
size = self._read64Bit(file)
self.unpacksizes.append(size)
sum += size
self.unpacksizes.append(folders[i].getUnpackSize() - sum)
id = file.read(1)
numdigests = 0
numdigeststotal = 0
for i in range(numfolders):
numsubstreams = self.numunpackstreams[i]
if numsubstreams != 1 or not folders[i].digestdefined:
numdigests += numsubstreams
numdigeststotal += numsubstreams
if id == PROPERTY_CRC:
digests = Digests(file, numdigests)
didx = 0
for i in range(numfolders):
folder = folders[i]
numsubstreams = self.numunpackstreams[i]
if numsubstreams == 1 and folder.digestdefined:
self.digestsdefined.append(True)
self.digests.append(folder.crc)
else:
for j in range(numsubstreams):
self.digestsdefined.append(digests.defined[didx])
self.digests.append(digests.crcs[didx])
didx += 1
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %r found' % id)
if not self.digestsdefined:
self.digestsdefined = [False] * numdigeststotal
self.digests = [0] * numdigeststotal
class StreamsInfo(Base):
""" informations about compressed streams """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_PACK_INFO:
self.packinfo = PackInfo(file)
id = file.read(1)
if id == PROPERTY_UNPACK_INFO:
self.unpackinfo = UnpackInfo(file)
id = file.read(1)
if id == PROPERTY_SUBSTREAMS_INFO:
self.substreamsinfo = SubstreamsInfo(file, self.unpackinfo.numfolders, self.unpackinfo.folders)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class FilesInfo(Base):
""" holds file properties """
def _readTimes(self, file, files, name):
defined = self._readBoolean(file, len(files), checkall=1)
# NOTE: the "external" flag is currently ignored, should be 0x00
external = file.read(1)
for i in range(len(files)):
if defined[i]:
files[i][name] = ArchiveTimestamp(self._readReal64Bit(file)[0])
else:
files[i][name] = None
def __init__(self, file):
self.numfiles = self._read64Bit(file)
self.files = [{'emptystream': False} for x in range(self.numfiles)]
numemptystreams = 0
while True:
typ = self._read64Bit(file)
if typ > 255:
raise FormatError('invalid type, must be below 256, is %d' % typ)
typ = pack('B', typ)
if typ == PROPERTY_END:
break
size = self._read64Bit(file)
buffer = BytesIO(file.read(size))
if typ == PROPERTY_EMPTY_STREAM:
isempty = self._readBoolean(buffer, self.numfiles)
list(map(lambda x, y: x.update({'emptystream': y}), self.files, isempty))
for x in isempty:
if x: numemptystreams += 1
emptyfiles = [False] * numemptystreams
antifiles = [False] * numemptystreams
elif typ == PROPERTY_EMPTY_FILE:
emptyfiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_ANTI:
antifiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_NAME:
external = buffer.read(1)
if external != unhexlify('00'):
self.dataindex = self._read64Bit(buffer)
# XXX: evaluate external
raise NotImplementedError
for f in self.files:
name = ''
while True:
ch = buffer.read(2)
if ch == unhexlify('0000'):
f['filename'] = name
break
name += ch.decode('utf-16')
elif typ == PROPERTY_CREATION_TIME:
self._readTimes(buffer, self.files, 'creationtime')
elif typ == PROPERTY_LAST_ACCESS_TIME:
self._readTimes(buffer, self.files, 'lastaccesstime')
elif typ == PROPERTY_LAST_WRITE_TIME:
self._readTimes(buffer, self.files, 'lastwritetime')
elif typ == PROPERTY_ATTRIBUTES:
defined = self._readBoolean(buffer, self.numfiles, checkall=1)
for i in range(self.numfiles):
f = self.files[i]
if defined[i]:
f['attributes'] = unpack('<L', buffer.read(4))[0]
else:
f['attributes'] = None
else:
raise FormatError('invalid type %r' % (typ))
class Header(Base):
""" the archive header """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_ARCHIVE_PROPERTIES:
self.properties = ArchiveProperties(file)
id = file.read(1)
if id == PROPERTY_ADDITIONAL_STREAMS_INFO:
self.additional_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_MAIN_STREAMS_INFO:
self.main_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_FILES_INFO:
self.files = FilesInfo(file)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % (repr(id)))
class ArchiveFile(Base):
""" wrapper around a file in the archive """
def __init__(self, info, start, src_start, size, folder, archive, maxsize=None):
self.digest = None
self._archive = archive
self._file = archive._file
self._start = start
self._src_start = src_start
self._folder = folder
self.size = size
# maxsize is only valid for solid archives
self._maxsize = maxsize
for k, v in info.items():
setattr(self, k, v)
self.reset()
self._decoders = {
COMPRESSION_METHOD_COPY: '_read_copy',
COMPRESSION_METHOD_LZMA: '_read_lzma',
COMPRESSION_METHOD_MISC_ZIP: '_read_zip',
COMPRESSION_METHOD_MISC_BZIP: '_read_bzip',
COMPRESSION_METHOD_7Z_AES256_SHA256: '_read_7z_aes256_sha256',
}
def _is_encrypted(self):
return COMPRESSION_METHOD_7Z_AES256_SHA256 in [x['method'] for x in self._folder.coders]
def reset(self):
self.pos = 0
def read(self):
if not self._folder.coders:
raise TypeError("file has no coder informations")
data = None
for coder in self._folder.coders:
method = coder['method']
decoder = None
while method and decoder is None:
decoder = self._decoders.get(method, None)
method = method[:-1]
if decoder is None:
raise UnsupportedCompressionMethodError(repr(coder['method']))
data = getattr(self, decoder)(coder, data)
return data
def _read_copy(self, coder, input):
if not input:
self._file.seek(self._src_start)
input = self._file.read(self.uncompressed)
return input[self._start:self._start+self.size]
def _read_from_decompressor(self, coder, decompressor, input, checkremaining=False, with_cache=False):
data = ''
idx = 0
cnt = 0
properties = coder.get('properties', None)
if properties:
decompressor.decompress(properties)
total = self.compressed
if not input and total is None:
remaining = self._start+self.size
out = BytesIO()
cache = getattr(self._folder, '_decompress_cache', None)
if cache is not None:
data, pos, decompressor = cache
out.write(data)
remaining -= len(data)
self._file.seek(pos)
else:
self._file.seek(self._src_start)
checkremaining = checkremaining and not self._folder.solid
while remaining > 0:
data = self._file.read(READ_BLOCKSIZE)
if checkremaining or (with_cache and len(data) < READ_BLOCKSIZE):
tmp = decompressor.decompress(data, remaining)
else:
tmp = decompressor.decompress(data)
assert len(tmp) > 0
out.write(tmp)
remaining -= len(tmp)
data = out.getvalue()
if with_cache and self._folder.solid:
# don't decompress start of solid archive for next file
# TODO: limit size of cached data
self._folder._decompress_cache = (data, self._file.tell(), decompressor)
else:
if not input:
self._file.seek(self._src_start)
input = self._file.read(total)
if checkremaining:
data = decompressor.decompress(input, self._start+self.size)
else:
data = decompressor.decompress(input)
return data[self._start:self._start+self.size]
def _read_lzma(self, coder, input):
dec = pylzma.decompressobj(maxlength=self._start+self.size)
try:
return self._read_from_decompressor(coder, dec, input, checkremaining=True, with_cache=True)
except ValueError:
if self._is_encrypted():
raise WrongPasswordError('invalid password')
raise
def _read_zip(self, coder, input):
dec = zlib.decompressobj(-15)
return self._read_from_decompressor(coder, dec, input, checkremaining=True)
def _read_bzip(self, coder, input):
dec = bz2.BZ2Decompressor()
return self._read_from_decompressor(coder, dec, input)
def read_7z_aes256_sha256(self, coder, input):
if not self._archive.password:
raise NoPasswordGivenError()
# TODO: this needs some sanity checks
firstbyte = ord(coder['properties'][0])
numcyclespower = firstbyte & 0x3f
if firstbyte & 0xc0 != 0:
saltsize = (firstbyte >> 7) & 1
ivsize = (firstbyte >> 6) & 1
secondbyte = ord(coder['properties'][1])
saltsize += (secondbyte >> 4)
ivsize += (secondbyte & 0x0f)
assert len(coder['properties']) == 2+saltsize+ivsize
salt = coder['properties'][2:2+saltsize]
iv = coder['properties'][2+saltsize:2+saltsize+ivsize]
assert len(salt) == saltsize
assert len(iv) == ivsize
assert numcyclespower <= 24
if ivsize < 16:
iv += '\x00'*(16-ivsize)
else:
salt = iv = ''
password = self._archive.password.encode('utf-16-le')
key = pylzma.calculate_key(password, numcyclespower, salt=salt)
cipher = pylzma.AESDecrypt(key, iv=iv)
if not input:
self._file.seek(self._src_start)
uncompressed_size = self.uncompressed
if uncompressed_size & 0x0f:
# we need a multiple of 16 bytes
uncompressed_size += 16 - (uncompressed_size & 0x0f)
input = self._file.read(uncompressed_size)
result = cipher.decrypt(input)
return result
def checkcrc(self):
if self.digest is None:
return True
self.reset()
data = self.read()
return super(ArchiveFile, self).checkcrc(self.digest, data)
# XXX global state
iv = None
ivSize = None
Salt = None
NumCyclesPower = None
SaltSize = None
def SetDecoderProperties2(data):
global iv, ivSize, Salt, NumCyclesPower, SaltSize
pos = 0
data = bytearray(data)
firstByte = data[pos]
pos = pos + 1
NumCyclesPower = firstByte & 0x3F;
if NumCyclesPower > 24:
# print "Bad NumCyclesPower value"
return None
if ((firstByte & 0xC0) == 0):
# XXX
return "S_OK"
SaltSize = (firstByte >> 7) & 1;
ivSize = (firstByte >> 6) & 1;
secondByte = data[pos]
pos = pos + 1
SaltSize += (secondByte >> 4);
ivSize += (secondByte & 0x0F);
# get salt
Salt = data[pos:pos+SaltSize]
Salt = str(Salt)
pos = pos + SaltSize
# get iv
iv = data[pos:pos+ivSize]
iv = str(iv)
if len(iv) < 16:
iv = iv + "\x00" * (16 - len(iv))
return "OK"
class Archive7z(Base):
""" the archive itself """
def __init__(self, file, password=None):
self._file = file
self.password = password
self.header = file.read(len(MAGIC_7Z))
if self.header != MAGIC_7Z:
raise FormatError('not a 7z file')
self.version = unpack('BB', file.read(2))
self.startheadercrc = unpack('<L', file.read(4))[0]
self.nextheaderofs, data = self._readReal64Bit(file)
crc = crc32(data)
self.nextheadersize, data = self._readReal64Bit(file)
crc = crc32(data, crc)
data = file.read(4)
self.nextheadercrc = unpack('<L', data)[0]
crc = crc32(data, crc) & 0xffffffff
if crc != self.startheadercrc:
raise FormatError('invalid header data')
self.afterheader = file.tell()
file.seek(self.nextheaderofs, 1)
buffer = BytesIO(file.read(self.nextheadersize))
if not self.checkcrc(self.nextheadercrc, buffer.getvalue()):
raise FormatError('invalid header data')
while True:
id = buffer.read(1)
if not id or id == PROPERTY_HEADER:
break
if id != PROPERTY_ENCODED_HEADER:
raise TypeError('Unknown field: %r' % (id))
# ReadAndDecodePackedStreams (7zIn.cpp)
streams = StreamsInfo(buffer)
file.seek(self.afterheader + 0)
data = bytes('', 'ascii')
for folder in streams.unpackinfo.folders:
file.seek(streams.packinfo.packpos, 1)
props = folder.coders[0]['properties']
# decode properties
if SetDecoderProperties2(props):
# derive keys
# password = "password".encode('utf-16-le')
# print NumCyclesPower, Salt, password
# key = pylzma.calculate_key(password, NumCyclesPower, salt=Salt)
# cipher = pylzma.AESDecrypt(key, iv=str(iv))
global Salt
if len(Salt) == 0:
Salt = "\x11\x22" # fake salt
for idx in range(len(streams.packinfo.packsizes)):
tmp = file.read(streams.packinfo.packsizes[idx])
fname = os.path.basename(self._file.name)
print "%s:$7z$0$%s$%s$%s$%s$%s$%s$%s$%s$%s" % (fname,
NumCyclesPower, SaltSize, binascii.hexlify(Salt),
ivSize, binascii.hexlify(iv), folder.crc, len(tmp),
folder.unpacksizes[idx], binascii.hexlify(tmp))
# print binascii.hexlify(tmp)
# result = cipher.decrypt(tmp)
# print folder.unpacksizes
# print folder.coders
# XXX we don't now how to handle unpacksizes of size > 1
# XXX we need to locate correct data and pass it to correct decompressor
# XXX correct decompressor can be located from folder.coders
# data = result # for checksum check
size = folder.unpacksizes[idx] # for checksum check
if len(folder.unpacksizes) > 1:
sys.stderr.write("%s : multiple unpacksizes found, not supported fully yet!\n" % fname)
# print binascii.hexlify(result)
# flds = Folder(BytesIO(result))
# print flds.coders
# print flds.packed_indexes, flds.totalout
# XXX return can't be right
return
# else:
# for idx in range(len(streams.packinfo.packsizes)):
# tmp = file.read(streams.packinfo.packsizes[idx])
# data += pylzma.decompress(props+tmp, maxlength=folder.unpacksizes[idx])
#
# if folder.digestdefined:
# if not self.checkcrc(folder.crc, data[0:size]):
# raise FormatError('invalid block data')
# # XXX return can't be right
# return
# XXX this part is not done yet
sys.stderr.write("%s : 7-Zip files without header encryption are *not* supported yet!\n" % (file.name))
return
buffer = BytesIO(file.read())
id = buffer.read(1)
self.files = []
if not id:
# empty archive
self.solid = False
self.numfiles = 0
self.filenames = []
return
xx = FilesInfo(buffer)
self.header = Header(buffer)
files = self.header.files
folders = self.header.main_streams.unpackinfo.folders
packinfo = self.header.main_streams.packinfo
subinfo = self.header.main_streams.substreamsinfo
packsizes = packinfo.packsizes
self.solid = packinfo.numstreams == 1
if hasattr(subinfo, 'unpacksizes'):
unpacksizes = subinfo.unpacksizes
else:
unpacksizes = [x.unpacksizes[0] for x in folders]
fidx = 0
obidx = 0
src_pos = self.afterheader
pos = 0
folder_start = 0
folder_pos = src_pos
maxsize = (self.solid and packinfo.packsizes[0]) or None
for idx in range(files.numfiles):
info = files.files[idx]
if info['emptystream']:
continue
folder = folders[fidx]
folder.solid = subinfo.numunpackstreams[fidx] > 1
maxsize = (folder.solid and packinfo.packsizes[fidx]) or None
if folder.solid:
# file is part of solid archive
info['compressed'] = None
elif obidx < len(packsizes):
# file is compressed
info['compressed'] = packsizes[obidx]
else:
# file is not compressed
info['compressed'] = unpacksizes[obidx]
info['uncompressed'] = unpacksizes[obidx]
file = ArchiveFile(info, pos, src_pos, unpacksizes[obidx], folder, self, maxsize=maxsize)
if subinfo.digestsdefined[obidx]:
file.digest = subinfo.digests[obidx]
self.files.append(file)
if folder.solid:
pos += unpacksizes[obidx]
else:
src_pos += info['compressed']
obidx += 1
if idx >= subinfo.numunpackstreams[fidx]+folder_start:
folder_pos += packinfo.packsizes[fidx]
src_pos = folder_pos
folder_start = idx
fidx += 1
self.numfiles = len(self.files)
self.filenames = map(lambda x: x.filename, self.files)
# interface like TarFile
def getmember(self, name):
# XXX: store files in dictionary
for f in self.files:
if f.filename == name:
return f
return None
def getmembers(self):
return self.files
def getnames(self):
return self.filenames
def list(self, verbose=True):
print ('total %d files in %sarchive' % (self.numfiles, (self.solid and 'solid ') or ''))
if not verbose:
print ('\n'.join(self.filenames))
return
for f in self.files:
extra = (f.compressed and '%10d ' % (f.compressed)) or ' '
print ('%10d%s%.8x %s' % (f.size, extra, f.digest, f.filename))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stdout.write("Usage: %s < encrypted 7-Zip files >\n" % \
sys.argv[0])
for filename in sys.argv[1:]:
f = Archive7z(open(filename, 'rb'))
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<html lang="en">
<head>
<link href="/css/video-js.css" rel="stylesheet">
<!-- If you'd like to support IE8 -->
<script src="/js/videojs-ie8.min.js"></script>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-108121762-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-108121762-1');
</script>
<title>Star Wars: The Clone Wars (2008 - 2015) Full Episodes | ItSaturday.com</title>
<meta charset="utf-8">
<link rel="stylesheet" href="/css/bootstrap.min.css">
<link rel="stylesheet" type="text/css" href="/css/main.css">
<link rel="stylesheet" type="text/css" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css">
<link rel="stylesheet" type="text/css" href="/css/bootcomplete.css">
<script src="/js/jquery.min.js"></script>
<script src="/js/jquery-ui-1.12.1.custom/jquery-ui.min.js"></script>
<script src="/js/Lettering.js-master/jquery.lettering.js"></script>
<script src="/js/jquery_lazyload.js" type="text/javascript"></script>
<script src="https://npmcdn.com/tether@1.2.4/dist/js/tether.min.js"></script>
<script src="/js/bootstrap.min.js"></script>
<script src="/js/EasyAutocomplete-1.3.5/jquery.easy-autocomplete.js"></script>
<link rel="stylesheet" href="/js/EasyAutocomplete-1.3.5/easy-autocomplete.min.css">
<meta name="keywords" content=""/>
<meta property="og:title" content="Star Wars: The Clone Wars (2008 - 2015) Full Episodes"/>
<meta property="og:description" content=""/>
<meta property="og:type" content=""/>
<meta property="og:url" content="http://www.itsaturday.com/Star-Wars-The-Clone-Wars-S6-Ep13---Sacrifice.html"/>
<meta property="og:image" content="http://www.itsaturday.com/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg"/>
<meta property="og:site_name" content="http://www.itsaturday.com"/>
<meta name="twitter:card" content="summary">
<meta name="twitter:site" content="@">
<meta name="twitter:url" content="http://www.itsaturday.com/Star-Wars-The-Clone-Wars-S6-Ep13---Sacrifice.html">
<meta name="twitter:title" content="Star Wars: The Clone Wars (2008 - 2015) Full Episodes">
<meta name="twitter:description" content="">
<meta name="twitter:image" content="http://www.itsaturday.com/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg">
<meta property="fb:app_id" content="?"/>
<meta name="verify-v1" content="?"/>
<meta itemprop="url" content="http://www.itsaturday.com/Star-Wars-The-Clone-Wars-S6-Ep13---Sacrifice.html"/>
<link rel="image_src" href="http://www.itsaturday.com/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg"/>
<link rel="videothumbnail" href="http://www.itsaturday.com/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg" type="image/jpeg"/>
<script>
$("document").ready(function () {
lazyload();
});
</script>
</head>
<body>
<div class="header container">
<div class="row hundred">
<div class="float-left" style="width: 240px; margin-left: 20px;">
<h1 class="h1" id="toggle" id="logo">
<h1 id="logo">
<a href="/" class="link-unstyled">
<span class="char1">I</span><span class="char2">t</span><span class="char3 spacer-char">S</span><span
class="char4">a</span><span class="char5">t</span><span class="char6">u</span><span
class="char7">r</span><span class="char8">d</span><span class="char9">a</span><span
class="char3">y</span><span class="char2">!</span>
</a>
</h1>
</h1>
</div>
<form action="/search/" method="get"><div class="float-left row" style="margin-top: 15px;" >
<div class="searchboxwrapper col-12" >
<input id="search-input" type="text" class="searchbox rounded" style="" placeholder="Series, Episode, Year..." autocomplete="off" name="q"/>
<input class="searchsubmit rounded" type="submit" id="searchsubmit" value="">
</div>
</form>
</div>
</div>
</div>
<script>
</script>
<script>
$(document).ready(function () {
var options = {
url: function(phrase) {
return "_search.php?q=" + phrase + "&format=json";
},
getValue: "text",
template: {
type: "links",
fields: {
link: "website-link"
}
},
ajaxSettings: {
dataType: "json",
method: "POST",
data: {
dataType: "json"
}
},
preparePostData: function (data) {
data.phrase = $("#search-input").val();
return data;
},
requestDelay: 400
};
$("#search-input").easyAutocomplete(options);
$('input.typeahead').typeahead(options);
$("#demo1 h1").lettering();
$("#demo2 h1").lettering('words');
$("#demo3 p").lettering('lines');
$("#demo4 h1").lettering('words').children("span").lettering();
$("#demo5 h1").lettering().children("span").css({
'display': 'inline-block',
'-webkit-transform': 'rotate(-25deg)'
});
});
</script>
<script>
$('#toggle').click(function () {
$("#toggle").effect("shake");
$('#toggle').wiggle({
waggle: 5, // amount of wiggle
duration: 2, // how long to wiggle (in seconds)
interval: 200, // how often to waggle (in milliseconds)
wiggleCallback: function (elem) {
// callback whenever the element is wiggled
}
});
});
</script>
<div class="sidebar col-3 rounded-bottom">
<h1 class="h3">Star Wars: The Clone Wars </h1>
<div class="list-group"><div class="bg-success rounded highlight"><span class="link-unstyled"><i class="fa fa-play-circle fa-fw"></i>S1 Ep1 – Ambush</span></div><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep2---Rising-Malevolence.html">S1 Ep2 – Rising Malevolence</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep3---Shadow-of-Malevolence.html">S1 Ep3 – Shadow of Malevolence</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep4---Destroy-Malevolence.html">S1 Ep4 – Destroy Malevolence</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep5---Rookies.html">S1 Ep5 – Rookies</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep6---Downfall-of-a-Droid.html">S1 Ep6 – Downfall of a Droid</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep7---Duel-of-the-Droids.html">S1 Ep7 – Duel of the Droids</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep8---Bombad-Jedi.html">S1 Ep8 – Bombad Jedi</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep9---Cloak-of-Darkness.html">S1 Ep9 – Cloak of Darkness</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep10---The-Lair-of-General-Grievous.html">S1 Ep10 – The Lair of General Grievous</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep11---Dooku-Captured.html">S1 Ep11 – Dooku Captured</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep12---The-Gungan-General.html">S1 Ep12 – The Gungan General</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep13---Jedi-Crash.html">S1 Ep13 – Jedi Crash</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep14---Defenders-of-Peace.html">S1 Ep14 – Defenders of Peace</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep15---Trespass.html">S1 Ep15 – Trespass</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep16---The-Hidden-Enemy.html">S1 Ep16 – The Hidden Enemy</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep17---Blue-Shadow-Virus.html">S1 Ep17 – Blue Shadow Virus</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep18---Mystery-of-a-Thousand-Moons.html">S1 Ep18 – Mystery of a Thousand Moons</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep19---Storm-Over-Ryloth.html">S1 Ep19 – Storm Over Ryloth</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep20---Innocents-of-Ryloth.html">S1 Ep20 – Innocents of Ryloth</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep21---Liberty-on-Ryloth.html">S1 Ep21 – Liberty on Ryloth</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep22---Hostage-Crisis.html">S1 Ep22 – Hostage Crisis</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep1-2---Holocron-Heist--Cargo-of-Doom.html">S2 Ep1-2 – Holocron Heist ~ Cargo of Doom</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep3---Children-of-the-Force.html">S2 Ep3 – Children of the Force</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep4---Senate-Spy.html">S2 Ep4 – Senate Spy</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep5---Landing-at-Point-Rain.html">S2 Ep5 – Landing at Point Rain</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep6---Weapons-Factory.html">S2 Ep6 – Weapons Factory</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep7---Legacy-of-Terror.html">S2 Ep7 – Legacy of Terror</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep8---Brain-Invaders.html">S2 Ep8 – Brain Invaders</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep9-10---Grievous-Intrigue--The-Deserter.html">S2 Ep9-10 – Grievous Intrigue ~ The Deserter</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep11---Lightsaber-Lost.html">S2 Ep11 – Lightsaber Lost</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep12---The-Mandalore-Plot.html">S2 Ep12 – The Mandalore Plot</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep13---Voyage-of-Temptation.html">S2 Ep13 – Voyage of Temptation</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep14---Duchess-of-Mandalore.html">S2 Ep14 – Duchess of Mandalore</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep15---Senate-Murders.html">S2 Ep15 – Senate Murders</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep16---Cat-and-Mouse.html">S2 Ep16 – Cat and Mouse</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep17---Bounty-Hunters.html">S2 Ep17 – Bounty Hunters</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep18---The-Zillo-Beast.html">S2 Ep18 – The Zillo Beast</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep19---The-Zillo-Beast-Strikes-Back.html">S2 Ep19 – The Zillo Beast Strikes Back</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep20---Death-Trap.html">S2 Ep20 – Death Trap</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S2-Ep21-22---R2-Come-Home--Lethal-Trackdown.html">S2 Ep21-22 – R2 Come Home ~ Lethal Trackdown</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep1-2---Clone-Cadets---ARC-Troopers.html">S3 Ep1-2 – Clone Cadets ~ ARC Troopers</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep3---Supply-Lines.html">S3 Ep3 – Supply Lines</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep4---Sphere-of-Influence.html">S3 Ep4 – Sphere of Influence</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep5---Corruption.html">S3 Ep5 – Corruption</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep6---The-Academy.html">S3 Ep6 – The Academy</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep7---Assassin.html">S3 Ep7 – Assassin</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep8---Evil-Plans.html">S3 Ep8 – Evil Plans</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep9---Hunt-for-Ziro.html">S3 Ep9 – Hunt for Ziro</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep10---Heroes-on-Both-Sides.html">S3 Ep10 – Heroes on Both Sides</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep11---Pursuit-of-Peace.html">S3 Ep11 – Pursuit of Peace</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep12---Nightsisters.html">S3 Ep12 – Nightsisters</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep13---Monster.html">S3 Ep13 – Monster</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep14---Witches-of-the-Mist.html">S3 Ep14 – Witches of the Mist</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep15---Overlords.html">S3 Ep15 – Overlords</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep16---Altar-of-Mortis.html">S3 Ep16 – Altar of Mortis</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep17---Ghosts-of-Mortis.html">S3 Ep17 – Ghosts of Mortis</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep18---The-Citadel.html">S3 Ep18 – The Citadel</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep19---Counter-Attack.html">S3 Ep19 – Counter Attack</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep20---Citadel-Rescue.html">S3 Ep20 – Citadel Rescue</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep21---Padawan-Lost.html">S3 Ep21 – Padawan Lost</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S3-Ep22---Wookiee-Hunt.html">S3 Ep22 – Wookiee Hunt</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep1-2---Water-War--Gungan-Attack.html">S4 Ep1-2 – Water War ~ Gungan Attack</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep3---Prisoners.html">S4 Ep3 – Prisoners</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep4---Shadow-Warrior.html">S4 Ep4 – Shadow Warrior</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep5---Mercy-Mission.html">S4 Ep5 – Mercy Mission</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep6---Nomad-Droids.html">S4 Ep6 – Nomad Droids</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep7---Darkness-on-Umbara.html">S4 Ep7 – Darkness on Umbara</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep8---The-General.html">S4 Ep8 – The General</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep9---Plan-of-Dissent.html">S4 Ep9 – Plan of Dissent</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep10---Carnage-of-Krell.html">S4 Ep10 – Carnage of Krell</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep11---Kidnapped.html">S4 Ep11 – Kidnapped</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep12---Slaves-of-the-Republic.html">S4 Ep12 – Slaves of the Republic</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep13---Escape-from-Kadavo.html">S4 Ep13 – Escape from Kadavo</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep14---A-Friend-in-Need.html">S4 Ep14 – A Friend in Need</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep15---Deception.html">S4 Ep15 – Deception</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep16---Friends-and-Enemies.html">S4 Ep16 – Friends and Enemies</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep17---The-Box.html">S4 Ep17 – The Box</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep18---Crisis-on-Naboo.html">S4 Ep18 – Crisis on Naboo</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep19---Massacre.html">S4 Ep19 – Massacre</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep20---Bounty.html">S4 Ep20 – Bounty</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep21---Brothers.html">S4 Ep21 – Brothers</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S4-Ep22---Revenge.html">S4 Ep22 – Revenge</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep1---Revival.html">S5 Ep1 – Revival</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep2---A-War-on-Two-Fronts.html">S5 Ep2 – A War on Two Fronts</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep3---Front-Runners.html">S5 Ep3 – Front Runners</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep4---The-Soft-War.html">S5 Ep4 – The Soft War</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep5---Tipping-Points.html">S5 Ep5 – Tipping Points</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep6---The-Gathering.html">S5 Ep6 – The Gathering</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep7---A-Test-of-Strength.html">S5 Ep7 – A Test of Strength</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep8---Bound-for-Rescue.html">S5 Ep8 – Bound for Rescue</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep9---A-Necessary-Bond.html">S5 Ep9 – A Necessary Bond</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep10---Secret-Weapons.html">S5 Ep10 – Secret Weapons</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep11---A-Sunny-Day-in-the-Void.html">S5 Ep11 – A Sunny Day in the Void</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep12---Missing-in-Action.html">S5 Ep12 – Missing in Action</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep13---Point-of-No-Return.html">S5 Ep13 – Point of No Return</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep14---Eminence.html">S5 Ep14 – Eminence</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep15---Shades-of-Reason.html">S5 Ep15 – Shades of Reason</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep16---The-Lawless.html">S5 Ep16 – The Lawless</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep17---Sabotage.html">S5 Ep17 – Sabotage</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep18---The-Jedi-Who-Knew-Too-Much.html">S5 Ep18 – The Jedi Who Knew Too Much</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep19---To-Catch-a-Jedi.html">S5 Ep19 – To Catch a Jedi</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S5-Ep20---The-Wrong-Jedi.html">S5 Ep20 – The Wrong Jedi</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep1---The-Unknown.html">S6 Ep1 – The Unknown</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep2---Conspiracy.html">S6 Ep2 – Conspiracy</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep3---Fugitive.html">S6 Ep3 – Fugitive</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep4---Orders.html">S6 Ep4 – Orders</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep5---An-Old-Friend.html">S6 Ep5 – An Old Friend</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep6---The-Rise-of-Clovis.html">S6 Ep6 – The Rise of Clovis</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep7---Crisis-at-the-Heart.html">S6 Ep7 – Crisis at the Heart</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep8---The-Disappeared-Part-1.html">S6 Ep8 – The Disappeared: Part 1</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep9---The-Disappeared-Part-2.html">S6 Ep9 – The Disappeared: Part 2</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep10---The-Lost-One.html">S6 Ep10 – The Lost One</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep11---Voices.html">S6 Ep11 – Voices</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep12---Destiny.html">S6 Ep12 – Destiny</a><div class="half-rule"></div><a class="link-group-item list-group-item-padding text-white" href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S6-Ep13---Sacrifice.html">S6 Ep13 – Sacrifice</a><div class="half-rule"></div><a target='_blank' href=https://www.fastemoji.com/>😂 😍 ❤ 😀 😎 🎉 😊 👍 💩 👩 👩🔬 😕 🌸 🐝 🐶 🤦🏽♀️ 😻 😸 😺 😹 ☕ 🍕 🍷 ❤️💜 💦 🤷🏽 💻 📆 🔥</a>
</div>
</div>
<header><div><div class="float-right"> <span class="enter-fullscreen black-bg text-white highlight rounded-bottom h6" role="button">fullscreen <i class="fa fa-expand"></i></span> <a href="/Star-Wars-The-Clone-Wars-2008---2015-Full-Episodes/Star-Wars-The-Clone-Wars-S1-Ep2---Rising-Malevolence.html" class="black-bg text-white highlight rounded-bottom h6">next episode <i class="fa fa-chevron-circle-right"></i></a></div>
</div><div style="margin-left: 10px;"><h1 class="h3 bold"> Star Wars: The Clone Wars S1 Ep1 – Ambush</h1></div><div style="padding-left: 10px;"> <a href="https://www.cool90s.com/advanced_search_result.php?keywords=Star+Wars%3A+The+Clone+Wars+&search_in_description=1&x=0&y=0" target="_blank"><img src="/images/dvd-icon.png" height="16"> Get on high quality DVD for yourself or as a gift</a></div><div style="padding-left: 10px;"> 🌸 <a href="https://www.fastemoji.com" target="_blank">Get the latest emojis in 1-Click copy-paste</a></div> </header>
<div class="row">
<div class="col-9 float-right">
<video id="my-video" class="video-js" controls autoplay preload="auto" style="width: 100%"
poster="http://www.itsaturday.com/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg.jpg" data-setup="{}">
<source src="/watch.php?url=https://2.bp.blogspot.com/hjnLR8T4GAG_QcNcpn71AUsBENDpqwAoH5vAJLU3_-AHUu16V_PArdw1sWS1obYg_y0MExXc1UCOqn01_-jPUaYzTWkGIjYHmu_5qat8uORB1nUnsgJI_r5bTTrvN5K1DhOTvNjb=m18&extra=.mp4" type='video/mp4'>
<p class="vjs-no-js">
To view this video please enable JavaScript, and consider upgrading to a web browser that
<a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
</p>
</video>
<script type="text/javascript">
$('.enter-fullscreen').click(function(e) {
e.preventDefault();
$('.vjs-play-control').click();
$('.vjs-fullscreen-control').click();
});
</script>
<script src="/js/video.js"></script>
<blockquote class="blockquote bg-faded">
<img class="img-fluid img-thumbnail rounded-circle float-right col-6 col-md-4 col-lg-3" src="/images/seriesImages/2023/Star-Wars-The-Clone-Wars-2008--2015-Full-Episodes.jpg">
<strong>Star Wars: The Clone Wars</strong> (2008 – 2015) full episodes watch cartoons online.<br />
Synopsis: The first weekly TV series from Lucasfilm Animation chronicles the adventures of Anakin Skywalker, Yoda, Obi-Wan Kenobi and other popular characters from the “Star Wars” universe during the violent Clone Wars, as dwindling numbers of Jedi knights struggle to restore peace.<br />
Creator: George Lucas<br />
Stars: Tom Kane, Dee Bradley Baker, Matt Lanter<br />
More information: <a href="http://www.imdb.com/title/tt0458290/?ref_=ttep_ep_tt" target="_blank">IMDB</a>, <a href="https://en.wikipedia.org/wiki/Star_Wars:_The_Clone_Wars_(2008_TV_series)" target="_blank">Wikipedia</a></p>
</blockquote>
<!-- Composite Start -->
<div id="M285456ScriptRootC169701">
<div id="M285456PreloadC169701">
</div>
<script>
(function(){
var D=new Date(),d=document,b='body',ce='createElement',ac='appendChild',st='style',ds='display',n='none',gi='getElementById';
var i=d[ce]('iframe');i[st][ds]=n;d[gi]("M285456ScriptRootC169701")[ac](i);try{var iw=i.contentWindow.document;iw.open();iw.writeln("<ht"+"ml><bo"+"dy></bo"+"dy></ht"+"ml>");iw.close();var c=iw[b];}
catch(e){var iw=d;var c=d[gi]("M285456ScriptRootC169701");}var dv=iw[ce]('div');dv.id="MG_ID";dv[st][ds]=n;dv.innerHTML=169701;c[ac](dv);
var s=iw[ce]('script');s.async='async';s.defer='defer';s.charset='utf-8';s.src="//jsc.mgid.com/i/t/itsaturday.com.169701.js?t="+D.getYear()+D.getMonth()+D.getDate()+D.getHours();c[ac](s);})();
</script>
</div>
<!-- Composite End -->
</div>
<div class="col-3 float-right">
<!-- Composite Start -->
<div id="M285456ScriptRootC168411">
<div id="M285456PreloadC168411">
</div>
<script>
(function(){
var D=new Date(),d=document,b='body',ce='createElement',ac='appendChild',st='style',ds='display',n='none',gi='getElementById';
var i=d[ce]('iframe');i[st][ds]=n;d[gi]("M285456ScriptRootC168411")[ac](i);try{var iw=i.contentWindow.document;iw.open();iw.writeln("<ht"+"ml><bo"+"dy></bo"+"dy></ht"+"ml>");iw.close();var c=iw[b];}
catch(e){var iw=d;var c=d[gi]("M285456ScriptRootC168411");}var dv=iw[ce]('div');dv.id="MG_ID";dv[st][ds]=n;dv.innerHTML=168411;c[ac](dv);
var s=iw[ce]('script');s.async='async';s.defer='defer';s.charset='utf-8';s.src="//jsc.mgid.com/i/t/itsaturday.com.168411.js?t="+D.getYear()+D.getMonth()+D.getDate()+D.getHours();c[ac](s);})();
</script>
</div>
<!-- Composite End -->
</div>
</div>
</div>
<span style='color:white'>2023</span>
|
{
"pile_set_name": "Github"
}
|
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This header declares the namespace google::protobuf::protobuf_unittest in order to expose
// any problems with the generated class names. We use this header to ensure
// unittest.cc will declare the namespace prior to other includes, while obeying
// normal include ordering.
//
// When generating a class name of "foo.Bar" we must ensure we prefix the class
// name with "::", in case the namespace google::protobuf::foo exists. We intentionally
// trigger that case here by declaring google::protobuf::protobuf_unittest.
//
// See ClassName in helpers.h for more details.
#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_UNITTEST_H__
#define GOOGLE_PROTOBUF_COMPILER_CPP_UNITTEST_H__
namespace google {
namespace protobuf {
namespace protobuf_unittest {}
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_COMPILER_CPP_UNITTEST_H__
|
{
"pile_set_name": "Github"
}
|
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Config.h>
#include <ATen/cuda/CUDAConfig.h>
#if !AT_CUDNN_ENABLED()
namespace at { namespace native {
// See Note [ATen preprocessor philosophy]
Tensor cudnn_grid_sampler_forward(
const Tensor& input_t, const Tensor& grid_t) {
AT_ERROR("cudnn_grid_sampler_forward: ATen not compiled with cuDNN support");
}
std::tuple<Tensor, Tensor> cudnn_grid_sampler_backward(
const Tensor& input_t, const Tensor& grid_t,
const Tensor& grad_output_t) {
AT_ERROR("cudnn_grid_sampler_backward: ATen not compiled with cuDNN support");
}
}}
#else // AT_CUDNN_ENABLED
#include <ATen/cudnn/Descriptors.h>
#include <ATen/cudnn/Types.h>
#include <ATen/cudnn/Utils.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/TensorUtils.h>
// TODO: descriptor checking
namespace at { namespace native {
namespace {
void setSamplerDescriptor(SpatialTransformerDescriptor& desc, cudnnDataType_t dataType, const at::Tensor& tensor)
{
int inputSize[4] = {0};
for (int i = 0; i < tensor.dim(); ++i) {
inputSize[i] = (int) tensor.size(i);
}
desc.set(dataType, 4, inputSize);
}
void checkGridSize(CheckedFrom c, TensorArg grid, TensorArg input)
{
// assert size of grid is n*h*w*2
// FYI: grid is between [-1, 1], where -1 left most pixel,
// 1 represents right most pixel (and hence 0 is the center pixel)
// if grid has values >1 or <-1, those values are ignored
checkContiguous(c, grid);
checkDim(c, grid, 4);
// TODO: Maybe more user friendly to report where the expected size
// came from
checkSize(c, grid, 0, input->size(0));
checkSize(c, grid, 3, 2);
}
} // namespace
Tensor cudnn_grid_sampler_forward(
const Tensor& input_t, const Tensor& grid_t)
{
TensorArg input{ contiguousIfZeroInStrides(input_t), "input", 1 },
grid{ grid_t.contiguous(), "grid", 2 };
CheckedFrom c = "cudnn_grid_sampler_forward";
checkAllSameGPU(c, {input, grid});
checkAllSameType(c, {input, grid});
checkGridSize(c, grid, input);
checkDim(c, input, 4);
auto output_t = at::empty({0}, input->options());
output_t.resize_({input->size(0), input->size(1), grid->size(1), grid->size(2)});
TensorDescriptor idesc{ *input }; // input descriptor
TensorDescriptor odesc{ output_t }; // output descriptor
SpatialTransformerDescriptor desc; // sampler descriptor
auto handle = getCudnnHandle();
auto dataType = getCudnnDataType(*input);
setSamplerDescriptor(desc, dataType, output_t);
Constant one(dataType, 1);
Constant zero(dataType, 0);
AT_CUDNN_CHECK(cudnnSpatialTfSamplerForward(
handle, desc.desc(),
&one, idesc.desc(), input->data_ptr(),
grid->data_ptr(),
&zero, odesc.desc(), output_t.data_ptr()
));
return output_t;
}
// NB: CuDNN does not support output mask; you always get both
// gradients.
std::tuple<Tensor, Tensor> cudnn_grid_sampler_backward(
const Tensor& input_t, const Tensor& grid_t,
const Tensor& grad_output_t)
{
TensorArg input{ contiguousIfZeroInStrides(input_t), "input", 1 },
grid{ grid_t.contiguous(), "grid", 2 },
grad_output{ contiguousIfZeroInStrides(grad_output_t), "grad_output", 3 };
CheckedFrom c = "cudnn_grid_sampler_backward";
checkAllSameGPU(c, {input, grad_output, grid});
checkGridSize(c, grid, input);
checkDim(c, input, 4);
checkDim(c, grad_output, 4);
auto grad_input_t = at::empty({0}, input->options());
grad_input_t.resize_(input->sizes());
auto grad_grid_t = at::empty({0}, grid->options());
grad_grid_t.resize_(grid->sizes());
TensorDescriptor idesc{ *input }; // input descriptor
TensorDescriptor odesc{ *grad_output }; // grad_output descriptor
TensorDescriptor gdesc{ grad_input_t }; // grad_input descriptor
SpatialTransformerDescriptor desc; // sampler descriptor
auto handle = getCudnnHandle();
auto dataType = getCudnnDataType(*input);
setSamplerDescriptor(desc, dataType, *grad_output);
Constant one(dataType, 1);
Constant zero(dataType, 0);
AT_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
handle, desc.desc(),
&one, idesc.desc(), input->data_ptr(),
&zero, gdesc.desc(), grad_input_t.data_ptr(),
&one, odesc.desc(), grad_output->data_ptr(),
// intruigingly, the outputs don't need descriptors
grid->data_ptr(),
&zero, grad_grid_t.data_ptr()
));
return std::tuple<Tensor, Tensor>{ grad_input_t, grad_grid_t };
}
}} // namespace at::cudnn
#endif
|
{
"pile_set_name": "Github"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.