text
stringlengths
2
1.04M
meta
dict
using namespace Crystal::Graphics; using namespace Crystal::Shader; TextureObject::TextureObject(const Image& image, const int id) { create(image, id); } void TextureObject::create(const Image& image, const int id) { this->id = id; this->width = image.getWidth(); this->height = image.getHeight(); glActiveTexture(GL_TEXTURE0 + id); glGenTextures(1, &texHandle); send(image); } void TextureObject::bind() const { glActiveTexture(GL_TEXTURE0 + id); glBindTexture(GL_TEXTURE_2D, texHandle); } void TextureObject::unbind() const { glBindTexture(GL_TEXTURE_2D, 0); glActiveTexture(GL_TEXTURE0); } TextureObject::TextureObject(const Imagef& image, const int id) { create(image, id); } void TextureObject::create(const Imagef& image, const int id) { this->id = id; this->width = image.getWidth(); this->height = image.getHeight(); glActiveTexture(GL_TEXTURE0 + id); glGenTextures(1, &texHandle); send(image); } void TextureObject::send(const Image& image) { glBindTexture(GL_TEXTURE_2D, texHandle); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.getWidth(), image.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, image.getValues().data()); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); //glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); //glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glBindTexture(GL_TEXTURE_2D, 0); assert(GL_NO_ERROR == glGetError()); } void TextureObject::send(const Imagef& image) { glBindTexture(GL_TEXTURE_2D, texHandle); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, image.getWidth(), image.getHeight(), 0, GL_RGBA, GL_FLOAT, image.getValues().data()); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); //glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); //glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glBindTexture(GL_TEXTURE_2D, 0); assert(GL_NO_ERROR == glGetError()); }
{ "content_hash": "bf6c3c9ce19862603b041860bf3b3973", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 134, "avg_line_length": 26.40740740740741, "alnum_prop": 0.6989247311827957, "repo_name": "SatoshiMabuchi/Crystal", "id": "5c9bb2064e1ad496289dcc684eb66bc82d8e76e2", "size": "2188", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Shader/TextureObject.cpp", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "6423" }, { "name": "C", "bytes": "1287805" }, { "name": "C++", "bytes": "4392272" }, { "name": "CMake", "bytes": "26945" }, { "name": "CSS", "bytes": "16267" }, { "name": "GLSL", "bytes": "1386" }, { "name": "HTML", "bytes": "7686479" }, { "name": "JavaScript", "bytes": "3422" }, { "name": "Makefile", "bytes": "4193" }, { "name": "Objective-C", "bytes": "72703" }, { "name": "Objective-C++", "bytes": "30376" }, { "name": "Python", "bytes": "1328" }, { "name": "Shell", "bytes": "2254" } ], "symlink_target": "" }
package prometheus import ( "fmt" "hash/fnv" "math" "sort" "sync" "time" "github.com/coreos/mantle/Godeps/_workspace/src/github.com/beorn7/perks/quantile" "github.com/coreos/mantle/Godeps/_workspace/src/github.com/golang/protobuf/proto" dto "github.com/coreos/mantle/Godeps/_workspace/src/github.com/prometheus/client_model/go" ) // quantileLabel is used for the label that defines the quantile in a // summary. const quantileLabel = "quantile" // A Summary captures individual observations from an event or sample stream and // summarizes them in a manner similar to traditional summary statistics: 1. sum // of observations, 2. observation count, 3. rank estimations. // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you // need aggregatable quantiles (e.g. you want the 99th percentile latency of all // queries served across all instances of a service), consider the Histogram // metric type. See the Prometheus documentation for more details. // // To create Summary instances, use NewSummary. type Summary interface { Metric Collector // Observe adds a single observation to the summary. Observe(float64) } var ( // DefObjectives are the default Summary quantile values. DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} errQuantileLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in summaries", quantileLabel, ) ) // Default values for SummaryOpts. const ( // DefMaxAge is the default duration for which observations stay // relevant. DefMaxAge time.Duration = 10 * time.Minute // DefAgeBuckets is the default number of buckets used to calculate the // age of observations. DefAgeBuckets = 5 // DefBufCap is the standard buffer size for collecting Summary observations. DefBufCap = 500 ) // SummaryOpts bundles the options for creating a Summary metric. It is // mandatory to set Name and Help to a non-empty string. All other fields are // optional and can safely be left at their zero value. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with // "_"). Only Name is mandatory, the others merely help structuring the // name. Note that the fully-qualified name of the Summary must be a // valid Prometheus metric name. Namespace string Subsystem string Name string // Help provides information about this Summary. Mandatory! // // Metrics with the same fully-qualified name must have the same Help // string. Help string // ConstLabels are used to attach fixed labels to this // Summary. Summaries with the same fully-qualified name must have the // same label names in their ConstLabels. // // Note that in most cases, labels have a value that varies during the // lifetime of a process. Those labels are usually managed with a // SummaryVec. ConstLabels serve only special purposes. One is for the // special case where the value of a label does not change during the // lifetime of a process, e.g. if the revision of the running binary is // put into a label. Another, more advanced purpose is if more than one // Collector needs to collect Summaries with the same fully-qualified // name. In that case, those Summaries must differ in the values of // their ConstLabels. See the Collector examples. // // If the value of a label never changes (not even between binaries), // that label most likely should not be a label at all (but part of the // metric name). ConstLabels Labels // Objectives defines the quantile rank estimates with their respective // absolute error. If Objectives[q] = e, then the value reported // for q will be the φ-quantile value for some φ between q-e and q+e. // The default value is DefObjectives. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant // for the summary. Must be positive. The default value is DefMaxAge. MaxAge time.Duration // AgeBuckets is the number of buckets used to exclude observations that // are older than MaxAge from the summary. A higher number has a // resource penalty, so only increase it if the higher resolution is // really required. For very high observation rates, you might want to // reduce the number of age buckets. With only one age bucket, you will // effectively see a complete reset of the summary each time MaxAge has // passed. The default value is DefAgeBuckets. AgeBuckets uint32 // BufCap defines the default sample stream buffer size. The default // value of DefBufCap should suffice for most uses. If there is a need // to increase the value, a multiple of 500 is recommended (because that // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 } // TODO: Great fuck-up with the sliding-window decay algorithm... The Merge // method of perk/quantile is actually not working as advertised - and it might // be unfixable, as the underlying algorithm is apparently not capable of // merging summaries in the first place. To avoid using Merge, we are currently // adding observations to _each_ age bucket, i.e. the effort to add a sample is // essentially multiplied by the number of age buckets. When rotating age // buckets, we empty the previous head stream. On scrape time, we simply take // the quantiles from the head stream (no merging required). Result: More effort // on observation time, less effort on scrape time, which is exactly the // opposite of what we try to accomplish, but at least the results are correct. // // The quite elegant previous contraption to merge the age buckets efficiently // on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) // can't be used anymore. // NewSummary creates a new Summary based on the provided SummaryOpts. func NewSummary(opts SummaryOpts) Summary { return newSummary( NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), opts, ) } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { panic(errInconsistentCardinality) } for _, n := range desc.variableLabels { if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } for _, lp := range desc.constLabelPairs { if lp.GetName() == quantileLabel { panic(errQuantileLabelNotAllowed) } } if len(opts.Objectives) == 0 { opts.Objectives = DefObjectives } if opts.MaxAge < 0 { panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) } if opts.MaxAge == 0 { opts.MaxAge = DefMaxAge } if opts.AgeBuckets == 0 { opts.AgeBuckets = DefAgeBuckets } if opts.BufCap == 0 { opts.BufCap = DefBufCap } s := &summary{ desc: desc, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), labelPairs: makeLabelPairs(desc, labelValues), hotBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } s.headStreamExpTime = time.Now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { s.streams = append(s.streams, s.newStream()) } s.headStream = s.streams[0] for qu := range s.objectives { s.sortedObjectives = append(s.sortedObjectives, qu) } sort.Float64s(s.sortedObjectives) s.Init(s) // Init self-collection. return s } type summary struct { SelfCollector bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. mtx sync.Mutex // Protects every other moving part. // Lock bufMtx before mtx if both are needed. desc *Desc objectives map[float64]float64 sortedObjectives []float64 labelPairs []*dto.LabelPair sum float64 cnt uint64 hotBuf, coldBuf []float64 streams []*quantile.Stream streamDuration time.Duration headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time } func (s *summary) Desc() *Desc { return s.desc } func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() now := time.Now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } s.hotBuf = append(s.hotBuf, v) if len(s.hotBuf) == cap(s.hotBuf) { s.asyncFlush(now) } } func (s *summary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. s.swapBufs(time.Now()) s.bufMtx.Unlock() s.flushColdBuf() sum.SampleCount = proto.Uint64(s.cnt) sum.SampleSum = proto.Float64(s.sum) for _, rank := range s.sortedObjectives { var q float64 if s.headStream.Count() == 0 { q = math.NaN() } else { q = s.headStream.Query(rank) } qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(q), }) } s.mtx.Unlock() if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil } func (s *summary) newStream() *quantile.Stream { return quantile.NewTargeted(s.objectives) } // asyncFlush needs bufMtx locked. func (s *summary) asyncFlush(now time.Time) { s.mtx.Lock() s.swapBufs(now) // Unblock the original goroutine that was responsible for the mutation // that triggered the compaction. But hold onto the global non-buffer // state mutex until the operation finishes. go func() { s.flushColdBuf() s.mtx.Unlock() }() } // rotateStreams needs mtx AND bufMtx locked. func (s *summary) maybeRotateStreams() { for !s.hotBufExpTime.Equal(s.headStreamExpTime) { s.headStream.Reset() s.headStreamIdx++ if s.headStreamIdx >= len(s.streams) { s.headStreamIdx = 0 } s.headStream = s.streams[s.headStreamIdx] s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) } } // flushColdBuf needs mtx locked. func (s *summary) flushColdBuf() { for _, v := range s.coldBuf { for _, stream := range s.streams { stream.Insert(v) } s.cnt++ s.sum += v } s.coldBuf = s.coldBuf[0:0] s.maybeRotateStreams() } // swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. func (s *summary) swapBufs(now time.Time) { if len(s.coldBuf) != 0 { panic("coldBuf is not empty") } s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf // hotBuf is now empty and gets new expiration set. for now.After(s.hotBufExpTime) { s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) } } type quantSort []*dto.Quantile func (s quantSort) Len() int { return len(s) } func (s quantSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s quantSort) Less(i, j int) bool { return s[i].GetQuantile() < s[j].GetQuantile() } // SummaryVec is a Collector that bundles a set of Summaries that all share the // same Desc, but have different values for their variable labels. This is used // if you want to count the same thing partitioned by various dimensions // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { MetricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // partitioned by the given label names. At least one label name must be // provided. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &SummaryVec{ MetricVec: MetricVec{ children: map[uint64]Metric{}, desc: desc, hash: fnv.New64a(), newMetric: func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }, }, } } // GetMetricWithLabelValues replaces the method of the same name in // MetricVec. The difference is that this method returns a Summary and not a // Metric so that no type conversion is required. func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Summary), err } return nil, err } // GetMetricWith replaces the method of the same name in MetricVec. The // difference is that this method returns a Summary and not a Metric so that no // type conversion is required. func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Summary), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { return m.MetricVec.WithLabelValues(lvs...).(Summary) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) func (m *SummaryVec) With(labels Labels) Summary { return m.MetricVec.With(labels).(Summary) } type constSummary struct { desc *Desc count uint64 sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair } func (s *constSummary) Desc() *Desc { return s.desc } func (s *constSummary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) sum.SampleSum = proto.Float64(s.sum) for rank, q := range s.quantiles { qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(q), }) } if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil } // NewConstSummary returns a metric representing a Prometheus summary with fixed // values for the count, sum, and quantiles. As those parameters cannot be // changed, the returned value does not implement the Summary interface (but // only the Metric interface). Users of this package will not have much use for // it in regular operations. However, when implementing custom Collectors, it is // useful as a throw-away metric that is generated on the fly to send it to // Prometheus in the Collect method. // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc. func NewConstSummary( desc *Desc, count uint64, sum float64, quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { if len(desc.variableLabels) != len(labelValues) { return nil, errInconsistentCardinality } return &constSummary{ desc: desc, count: count, sum: sum, quantiles: quantiles, labelPairs: makeLabelPairs(desc, labelValues), }, nil } // MustNewConstSummary is a version of NewConstSummary that panics where // NewConstMetric would have returned an error. func MustNewConstSummary( desc *Desc, count uint64, sum float64, quantiles map[float64]float64, labelValues ...string, ) Metric { m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) if err != nil { panic(err) } return m }
{ "content_hash": "f09f3b116c19c458f8393964415d64d7", "timestamp": "", "source": "github", "line_count": 527, "max_line_length": 91, "avg_line_length": 30.15180265654649, "alnum_prop": 0.7200755191944619, "repo_name": "mischief/mantle", "id": "7dc2498e4ddc7611e87116f75e2c5ba2bc19164b", "size": "16485", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "340535" }, { "name": "Shell", "bytes": "1724" } ], "symlink_target": "" }
/* Written by Ulrich Drepper <drepper@gnu.ai.mit.edu> */ /* Hacked to work with BusyBox by Alfred M. Szmidt <ams@trillian.itslinux.org> */ #ifndef _MD5_H #define _MD5_H 1 /* Some useful definitions */ #define FALSE ((int) 1) #define TRUE ((int) 0) #include <stdio.h> #include <errno.h> #include <ctype.h> #include <getopt.h> /* It turns out that libc5 doesn't have this in its headers * even though it is actually in the lib. Force it to work #if ! defined __GLIBC__ && ! defined __UCLIBC__ #define getline __getline extern _IO_ssize_t getline __P ((char **, size_t *, FILE *)); #endif */ #include <sys/types.h> #include <stdlib.h> #include <string.h> //#include <endian.h> #if defined HAVE_LIMITS_H || _LIBC # include <limits.h> #endif /* The following contortions are an attempt to use the C preprocessor to determine an unsigned integral type that is 32 bits wide. An alternative approach is to use autoconf's AC_CHECK_SIZEOF macro, but doing that would require that the configure script compile and *run* the resulting executable. Locally running cross-compiled executables is usually not possible. */ #ifdef _LIBC # include <sys/types.h> typedef u_int32_t md5_uint32; #else # if defined __STDC__ && __STDC__ # define UINT_MAX_32_BITS 4294967295U # else # define UINT_MAX_32_BITS 0xFFFFFFFF # endif /* If UINT_MAX isn't defined, assume it's a 32-bit type. This should be valid for all systems GNU cares about because that doesn't include 16-bit systems, and only modern systems (that certainly have <limits.h>) have 64+-bit integral types. */ # ifndef UINT_MAX # define UINT_MAX UINT_MAX_32_BITS # endif # if UINT_MAX == UINT_MAX_32_BITS typedef unsigned int md5_uint32; # else # if USHRT_MAX == UINT_MAX_32_BITS typedef unsigned short md5_uint32; # else # if ULONG_MAX == UINT_MAX_32_BITS typedef unsigned long md5_uint32; # else /* The following line is intended to evoke an error. Using #error is not portable enough. */ "Cannot determine unsigned 32-bit data type." # endif # endif # endif #endif #undef __P #if defined (__STDC__) && __STDC__ #define __P(x) x #else #define __P(x) () #endif /* Structure to save state of computation between the single steps. */ struct md5_ctx { md5_uint32 A; md5_uint32 B; md5_uint32 C; md5_uint32 D; md5_uint32 total[2]; md5_uint32 buflen; char buffer[128]; }; /* * The following three functions are build up the low level used in * the functions `md5_stream' and `md5_buffer'. */ /* Initialize structure containing state of computation. (RFC 1321, 3.3: Step 3) */ extern void md5_init_ctx __P ((struct md5_ctx *ctx)); /* Starting with the result of former calls of this function (or the initialization function update the context for the next LEN bytes starting at BUFFER. It is necessary that LEN is a multiple of 64!!! */ extern void md5_process_block __P ((const void *buffer, size_t len, struct md5_ctx *ctx)); /* Starting with the result of former calls of this function (or the initialization function update the context for the next LEN bytes starting at BUFFER. It is NOT required that LEN is a multiple of 64. */ extern void md5_process_bytes __P ((const void *buffer, size_t len, struct md5_ctx *ctx)); /* Process the remaining bytes in the buffer and put result from CTX in first 16 bytes following RESBUF. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ extern void *md5_finish_ctx __P ((struct md5_ctx *ctx, void *resbuf)); /* Put result from CTX in first 16 bytes following RESBUF. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ extern void *md5_read_ctx __P ((const struct md5_ctx *ctx, void *resbuf)); /* Compute MD5 message digest for bytes read from STREAM. The resulting message digest number will be written into the 16 bytes beginning at RESBLOCK. */ extern int md5_stream __P ((FILE *stream, void *resblock)); /* Compute MD5 message digest for LEN bytes beginning at BUFFER. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. */ extern void *md5_buffer __P ((const char *buffer, size_t len, void *resblock)); #endif
{ "content_hash": "97b003f3e3645ac5565fcb253d3c482d", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 81, "avg_line_length": 30.960264900662253, "alnum_prop": 0.7024598930481284, "repo_name": "EDACC/edacc_jobserver", "id": "8c3ad25f25c5b0b22a003a670ea8cd20ba872ed2", "size": "5571", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/md5sum.h", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "17345" }, { "name": "C++", "bytes": "38933" }, { "name": "Perl", "bytes": "2288" }, { "name": "R", "bytes": "1016" } ], "symlink_target": "" }
//****************************** // Written by Peter Golde // Copyright (c) 2004-2005, Wintellect // // Use and restribution of this code is subject to the license agreement // contained in the file "License.txt" accompanying this file. //****************************** using System; using System.Collections.Generic; using NUnit.Framework; namespace Wintellect.PowerCollections.Tests { // A class for testing the "Hash" class. [TestFixture] public class HashTests { internal Hash<TestItem> hash; internal class DataComparer : System.Collections.Generic.IEqualityComparer<TestItem> { public bool Equals(TestItem x, TestItem y) { return string.Equals(x.key, y.key); } public int GetHashCode(TestItem obj) { return obj.key.GetHashCode(); } } /// <summary> /// Insert a key and print/validate the hash. /// </summary> /// <param name="key"></param> private void InsertPrintValidate(string key) { InsertPrintValidate(key, 0, true); } private void InsertPrintValidate(string key, int data) { InsertPrintValidate(key, data, true); } private void InsertPrintValidate(string key, int data, bool replaceOnDuplicate) { TestItem oldData; hash.Insert(new TestItem(key, data), replaceOnDuplicate, out oldData); #if DEBUG hash.Print(); hash.Validate(); #endif //DEBUG } private void InsertPrintValidate(string key, int data, bool replaceOnDuplicate, int expectedoldData) { TestItem oldData; hash.Insert(new TestItem(key, data), replaceOnDuplicate, out oldData); #if DEBUG hash.Print(); hash.Validate(); #endif //DEBUG Assert.AreEqual(expectedoldData, oldData.data); } /// <summary> /// Insert a key and validate the hash. /// </summary> /// <param name="key"></param> private void InsertValidate(string key) { InsertValidate(key, 0, true); } private void InsertValidate(string key, int data) { InsertValidate(key, data, true); } private void InsertValidate(string key, int data, bool replaceOnDuplicate) { TestItem oldData; hash.Insert(new TestItem(key, data), replaceOnDuplicate, out oldData); #if DEBUG hash.Validate(); #endif //DEBUG } private void InsertValidate(string key, int data, bool replaceOnDuplicate, int expectedOldData) { TestItem oldData; hash.Insert(new TestItem(key, data), replaceOnDuplicate, out oldData); #if DEBUG hash.Validate(); #endif //DEBUG Assert.AreEqual(expectedOldData, oldData.data); } /// <summary> /// Delete a key, check the data in the deleted key, print and validate. /// </summary> /// <param name="key">Key to delete.</param> /// <param name="data">Expected data in the deleted key.</param> private void DeletePrintValidate(string key, int data) { TestItem itemFound; int countBefore = hash.ElementCount; bool success = hash.Delete(new TestItem(key), out itemFound); #if DEBUG hash.Print(); #endif //DEBUG Assert.IsTrue(success, "Key to delete wasn't found"); Assert.AreEqual(data, itemFound.data, "Data in deleted key was incorrect."); int countAfter = hash.ElementCount; Assert.AreEqual(countBefore - 1, countAfter, "Count of elements incorrect after deletion"); #if DEBUG hash.Validate(); #endif //DEBUG } private void FindKey(string key, int value) { TestItem itemFound; bool found = hash.Find(new TestItem(key), false, out itemFound); Assert.IsTrue(found, "Key was not found in the hash"); Assert.AreEqual(value, itemFound.data, "Wrong value found in the hash"); } private bool FindReplaceKey(string key, int newValue, int expectedOldValue) { TestItem itemFound; bool found = hash.Find(new TestItem(key, newValue), true, out itemFound); Assert.AreEqual(expectedOldValue, itemFound.data); return found; } /// <summary> /// Test creation of the hash. /// </summary> [Test] public void Create() { hash = new Hash<TestItem>(new DataComparer()); #if DEBUG hash.Print(); hash.Validate(); #endif //DEBUG } /// <summary> /// Insert values into hash to test the basic insertion algorithm. Validate /// and print the hash after each step. /// </summary> [Test] public void NormalInsert() { hash = new Hash<TestItem>(new DataComparer()); InsertPrintValidate("m"); InsertPrintValidate("b"); InsertPrintValidate("t"); InsertPrintValidate("o"); InsertPrintValidate("z"); InsertPrintValidate("k"); InsertPrintValidate("g"); InsertPrintValidate("a5"); InsertPrintValidate("c"); InsertPrintValidate("a2"); InsertPrintValidate("a7"); InsertPrintValidate("i"); InsertPrintValidate("h"); Assert.AreEqual(13, hash.ElementCount, "Wrong number of items in the hash."); } /// <summary> /// Insert values into hash and then find values in the hash. /// </summary> [Test] public void NormalFind() { hash = new Hash<TestItem>(new DataComparer()); InsertValidate("m", 101); FindKey("m", 101); InsertValidate("b", 102); InsertValidate("t", 103); FindKey("b", 102); FindKey("t", 103); InsertValidate("o", 104); FindKey("b", 102); InsertValidate("z", 105); InsertValidate("g", 106); FindKey("g", 106); InsertValidate("a5", 107); InsertValidate("c", 8); InsertValidate("a2", 9); FindKey("z", 105); InsertValidate("a7", 10); InsertValidate("i", 11); InsertValidate("h", 112); InsertValidate("k", 113); Assert.AreEqual(13, hash.ElementCount, "Wrong number of items in the hash."); FindKey("m", 101); FindKey("b", 102); FindKey("t", 103); FindKey("o", 104); FindKey("z", 105); FindKey("g", 106); FindKey("a5", 107); FindKey("c", 8); FindKey("a2", 9); FindKey("a7", 10); FindKey("i", 11); FindKey("h", 112); FindKey("k", 113); } /// <summary> /// Test find with the replace option.. /// </summary> [Test] public void FindReplace() { bool b; hash = new Hash<TestItem>(new DataComparer()); InsertValidate("m", 101); FindKey("m", 101); InsertValidate("b", 102); InsertValidate("t", 103); b = FindReplaceKey("b", 202, 102); Assert.IsTrue(b); FindKey("t", 103); InsertValidate("o", 104); FindKey("b", 202); InsertValidate("z", 105); InsertValidate("g", 106); FindKey("g", 106); b = FindReplaceKey("a5", 77, 0); Assert.IsFalse(b); b = FindReplaceKey("a5", 134, 0); Assert.IsFalse(b); b = FindReplaceKey("m", 201, 101); Assert.IsTrue(b); InsertValidate("a5", 107); InsertValidate("c", 8); InsertValidate("k", 313); InsertValidate("a2", 9); FindKey("z", 105); b = FindReplaceKey("m", 301, 201); Assert.IsTrue(b); InsertValidate("a7", 10); b = FindReplaceKey("a5", 207, 107); Assert.IsTrue(b); InsertValidate("i", 11); InsertValidate("h", 112); b = FindReplaceKey("z", 205, 105); Assert.IsTrue(b); b = FindReplaceKey("g", 206, 106); Assert.IsTrue(b); b = FindReplaceKey("g", 306, 206); Assert.IsTrue(b); b = FindReplaceKey("k", 513, 313); Assert.AreEqual(13, hash.ElementCount, "Wrong number of items in the hash."); FindKey("m", 301); FindKey("b", 202); FindKey("t", 103); FindKey("o", 104); FindKey("z", 205); FindKey("g", 306); FindKey("a5", 207); FindKey("c", 8); FindKey("a2", 9); FindKey("a7", 10); FindKey("i", 11); FindKey("h", 112); FindKey("k", 513); } /// <summary> /// Insert values into tree using "do-nothing" policy and then find values in the tree. /// </summary> [Test] public void DoNothingFind() { hash = new Hash<TestItem>(new DataComparer()); InsertValidate("m", 101, false, 0); FindKey("m", 101); InsertValidate("b", 102, false, 0); InsertValidate("t", 103, false, 0); InsertValidate("m", 201, false, 101); FindKey("b", 102); FindKey("t", 103); InsertValidate("o", 104, false, 0); FindKey("b", 102); InsertValidate("z", 105, false, 0); InsertValidate("g", 106, false, 0); InsertValidate("b", 202, false, 102); FindKey("g", 106); InsertValidate("g", 206, false, 106); InsertValidate("a5", 107, false, 0); InsertValidate("t", 203, false, 103); InsertValidate("c", 8, false, 0); InsertValidate("a2", 9, false, 0); FindKey("z", 105); InsertValidate("a7", 10, false, 0); InsertValidate("i", 11, false, 0); InsertValidate("h", 112, false, 0); InsertValidate("z", 205, false, 105); InsertValidate("a2", 209, false, 9); InsertValidate("c", 208, false, 8); InsertValidate("i", 211, false, 11); InsertValidate("h", 212, false, 112); InsertValidate("k", 113, false, 0); InsertValidate("m", 401, false, 101); InsertValidate("k", 213, false, 113); Assert.AreEqual(13, hash.ElementCount, "Wrong number of items in the tree."); FindKey("m", 101); FindKey("b", 102); FindKey("t", 103); FindKey("o", 104); FindKey("z", 105); FindKey("g", 106); FindKey("a5", 107); FindKey("c", 8); FindKey("a2", 9); FindKey("a7", 10); FindKey("i", 11); FindKey("h", 112); FindKey("k", 113); } /// <summary> /// Check that deletion works. /// </summary> [Test] public void Delete() { hash = new Hash<TestItem>(new DataComparer()); InsertPrintValidate("m", 101); DeletePrintValidate("m", 101); InsertPrintValidate("m", 101); InsertPrintValidate("b", 102); InsertPrintValidate("t", 103); DeletePrintValidate("b", 102); DeletePrintValidate("m", 101); DeletePrintValidate("t", 103); InsertPrintValidate("m", 101); InsertPrintValidate("b", 102); InsertPrintValidate("t", 103); InsertPrintValidate("o", 104); InsertPrintValidate("z", 105); InsertPrintValidate("g", 106); InsertPrintValidate("a5", 107); InsertPrintValidate("c", 8); InsertPrintValidate("a2", 9); InsertPrintValidate("a7", 10); InsertPrintValidate("i", 11); InsertPrintValidate("h", 112); InsertPrintValidate("k", 113); DeletePrintValidate("m", 101); DeletePrintValidate("b", 102); DeletePrintValidate("t", 103); DeletePrintValidate("o", 104); DeletePrintValidate("z", 105); DeletePrintValidate("h", 112); DeletePrintValidate("g", 106); DeletePrintValidate("a5", 107); DeletePrintValidate("c", 8); DeletePrintValidate("a2", 9); DeletePrintValidate("k", 113); DeletePrintValidate("a7", 10); DeletePrintValidate("i", 11); } [Test] public void DeleteNotPresent() { int dummy; Hash<int> t = new Hash<int>(EqualityComparer<int>.Default); t.Insert(3, true, out dummy); t.Insert(1, true, out dummy); t.Insert(5, true, out dummy); t.Insert(3, true, out dummy); t.Insert(2, true, out dummy); t.Insert(2, true, out dummy); t.Insert(3, true, out dummy); t.Insert(4, true, out dummy); bool b; int d; b = t.Delete(1, out d); Assert.IsTrue(b); #if DEBUG t.Print(); t.Validate(); #endif //DEBUG b = t.Delete(1, out d); Assert.IsFalse(b); #if DEBUG t.Print(); t.Validate(); #endif //DEBUG b = t.Delete(int.MinValue, out d); Assert.IsFalse(b); #if DEBUG t.Print(); t.Validate(); #endif //DEBUG b = t.Delete(3, out d); Assert.IsTrue(b); #if DEBUG t.Print(); t.Validate(); #endif //DEBUG b = t.Delete(3, out d); Assert.IsFalse(b); #if DEBUG t.Print(); t.Validate(); #endif //DEBUG } /// <summary> /// Insert values into tree and enumerate then to test enumeration. /// </summary> [Test] public void Enumerate() { hash = new Hash<TestItem>(new DataComparer()); InsertValidate("m"); InsertValidate("b"); InsertValidate("t"); InsertValidate("o"); InsertValidate("p"); InsertValidate("g"); InsertValidate("a5"); InsertValidate("c"); InsertValidate("a2"); InsertValidate("a7"); InsertValidate("i"); InsertValidate("h"); InsertValidate("o"); InsertValidate("l"); InsertValidate("k"); InsertValidate("c"); string[] keys = new string[] { "a2", "a5", "a7", "b", "c", "g", "h", "i", "k", "l", "m", "o", "p", "t" }; foreach (TestItem item in hash) { int index; index = Array.IndexOf(keys, item.key); Assert.IsTrue(index >= 0, "key not found in array"); keys[index] = null; } } const int LENGTH = 500; // length of each random array of values. const int ITERATIONS = 30; // number of iterations /// <summary> /// Create a random array of values. /// </summary> /// <param name="seed">Seed for random number generators</param> /// <param name="length">Length of array</param> /// <param name="max">Maximum value of number. Should be much /// greater than length.</param> /// <param name="allowDups">Whether to allow duplicate elements.</param> /// <returns></returns> private int[] CreateRandomArray(int seed, int length, int max, bool allowDups) { Random rand = new Random(seed); int[] a = new int[length]; for (int i = 0; i < a.Length; ++i) a[i] = -1; for (int el = 0; el < a.Length; ++el) { int value; do { value = rand.Next(max); } while (!allowDups && Array.IndexOf(a, value) >= 0); a[el] = value; } return a; } /// <summary> /// Insert all the elements of an integer array into the tree. The /// values in the tree are the indexes of the array. /// </summary> /// <param name="a">Array of values to insert.</param> private void InsertArray(int[] a) { TestItem dummy; for (int i = 0; i < a.Length; ++i) { string s = StringFromInt(a[i]); hash.Insert(new TestItem(s, i), true, out dummy); #if DEBUG if (i % 50 == 0) hash.Validate(); #endif //DEBUG } #if DEBUG hash.Validate(); #endif //DEBUG } private string StringFromInt(int i) { return string.Format("e{0}", i); } /// <summary> /// Insert LENGTH items in random order into the tree and validate /// it. Do this ITER times. /// </summary> [Test] public void InsertRandom() { for (int iter = 0; iter < ITERATIONS; ++iter) { hash = new Hash<TestItem>(new DataComparer()); int[] a = CreateRandomArray(iter, LENGTH, LENGTH * 10, false); InsertArray(a); #if DEBUG hash.Validate(); #endif //DEBUG Assert.AreEqual(LENGTH, hash.ElementCount, "Wrong number of items in the tree."); } } /// <summary> /// Insert LENGTH items in random order into the tree and then find them all /// Do this ITER times. /// </summary> [Test] public void FindRandom() { for (int iter = 0; iter < ITERATIONS; ++iter) { hash = new Hash<TestItem>(new DataComparer()); int[] a = CreateRandomArray(iter + 1000, LENGTH, LENGTH * 10, false); InsertArray(a); #if DEBUG hash.Validate(); #endif //DEBUG Assert.AreEqual(LENGTH, hash.ElementCount, "Wrong number of items in the hash."); for (int el = 0; el < a.Length; ++el) { FindKey(StringFromInt(a[el]), el); } } } /// <summary> /// Insert LENGTH items in random order into the tree and then enumerate them. /// Do this ITER times. /// </summary> [Test] public void EnumerateRandom() { for (int iter = 0; iter < ITERATIONS / 10; ++iter) { hash = new Hash<TestItem>(new DataComparer()); int[] a = CreateRandomArray(iter + 1000, LENGTH, LENGTH * 10, false); InsertArray(a); #if DEBUG hash.Validate(); #endif //DEBUG Assert.AreEqual(LENGTH, hash.ElementCount, "Wrong number of items in the hash."); foreach (TestItem item in hash) { int index = -1; for (int i = 0; i < a.Length; ++i) if (StringFromInt(a[i]) == item.key) index = i; Assert.IsTrue(index >= 0); Assert.IsTrue(index == item.data); a[index] = -1; } foreach (int i in a) Assert.AreEqual(-1, i); } } /// <summary> /// Insert and delete items from the tree at random, finally removing all /// the items that are in the tree. Validate the tree after each step. /// </summary> [Test] public void DeleteRandom() { for (int iter = 0; iter < ITERATIONS / 10; ++iter) { hash = new Hash<TestItem>(new DataComparer()); bool[] a = new bool[LENGTH]; int[] value = new int[LENGTH]; Random rand = new Random(iter + 5000); TestItem itemFound; for (int i = 0; i < LENGTH * 10; ++i) { int v = rand.Next(LENGTH); string key = StringFromInt(v); if (a[v] && rand.Next(4) != 0) { // Already in the hash. Make sure we can find it, then delete it. bool b = hash.Find(new TestItem(key), false, out itemFound); Assert.IsTrue(b, "Couldn't find key in hash"); Assert.AreEqual(value[v], itemFound.data, "Data is incorrect"); b = hash.Delete(new TestItem(key), out itemFound); Assert.IsTrue(b, "Couldn't delete key in hash"); Assert.AreEqual(value[v], itemFound.data, "Data is incorrect"); #if DEBUG if (i % 50 == 0) hash.Validate(); #endif //DEBUG a[v] = false; value[v] = 0; } else if (i < LENGTH * 7) { // Add it. value[v] = rand.Next(10000) + 1; bool b = hash.Find(new TestItem(key), false, out itemFound); Assert.AreEqual(a[v], b); TestItem dummy; b = hash.Insert(new TestItem(key, value[v]), true, out dummy); Assert.AreEqual(a[v], ! b); #if DEBUG if (i % 50 == 0) hash.Validate(); #endif //DEBUG a[v] = true; } } for (int v = 0; v < LENGTH; ++v) { string key = StringFromInt(v); if (a[v]) { // Already in the hash. Make sure we can find it, then delete it. bool b = hash.Find(new TestItem(key), false, out itemFound); Assert.IsTrue(b, "Couldn't find key in hash"); Assert.AreEqual(value[v], itemFound.data, "Data is incorrect"); b = hash.Delete(new TestItem(key), out itemFound); Assert.IsTrue(b, "Couldn't delete key in hash"); Assert.AreEqual(value[v], itemFound.data, "Data is incorrect"); #if DEBUG if (v % 50 == 0) hash.Validate(); #endif //DEBUG a[v] = false; } } } #if DEBUG hash.Validate(); #endif //DEBUG } [Test] public void Clone() { hash = new Hash<TestItem>(new DataComparer()); InsertValidate("foo", 3); InsertValidate("bar", 4); InsertValidate("bingo", 5); InsertValidate("biff", 6); InsertValidate("zip", 7); InsertValidate("zap", 8); Hash<TestItem> clone = hash.Clone(null); #if DEBUG clone.Validate(); #endif //DEBUG InsertValidate("a", 51); InsertValidate("b", 52); InsertValidate("c", 53); InsertValidate("d", 54); #if DEBUG clone.Validate(); #endif //DEBUG Assert.AreEqual(6, clone.ElementCount); string[] s_array = { "bar", "biff", "bingo", "foo", "zap", "zip" }; int i = 0; foreach (TestItem item in clone) { int index = Array.IndexOf(s_array, item.key); Assert.IsTrue(index >= 0); Assert.AreEqual(s_array[index], item.key); s_array[index] = null; ++i; } Assert.AreEqual(6, i); hash = new Hash<TestItem>(new DataComparer()); clone = hash.Clone(null); Assert.IsTrue(hash.ElementCount == 0 && clone.ElementCount == 0); #if DEBUG clone.Validate(); #endif //DEBUG } [Test] public void GrowShrink() { Hash<double> hash1 = new Hash<double>(EqualityComparer<double>.Default); double dummy; Random r = new Random(13); for (int i = 0; i < 1000; ++i) { bool b = hash1.Insert(r.NextDouble(), true, out dummy); Assert.IsTrue(b); } #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 2048); r = new Random(13); for (int i = 0; i < 600; ++i) { bool b = hash1.Delete(r.NextDouble(), out dummy); Assert.IsTrue(b); } #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 1024); for (int i = 0; i < 380; ++i) { bool b = hash1.Delete(r.NextDouble(), out dummy); Assert.IsTrue(b); } #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 64); for (int i = 0; i < 20; ++i) { bool b = hash1.Delete(r.NextDouble(), out dummy); Assert.IsTrue(b); } #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 0); hash1.Insert(4.5, true, out dummy); #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 16); } [Test] public void LoadFactor() { Hash<double> hash1 = new Hash<double>(EqualityComparer<double>.Default); double dummy; Random r = new Random(13); for (int i = 0; i < 600; ++i) { bool b = hash1.Insert(r.NextDouble(), true, out dummy); Assert.IsTrue(b); } #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 1024); hash1.LoadFactor = 0.55F; Assert.AreEqual(0.55F, hash1.LoadFactor); #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 2048); hash1.LoadFactor = 0.9F; Assert.AreEqual(0.9F, hash1.LoadFactor); #if DEBUG hash1.PrintStats(); hash1.Validate(); #endif //DEBUG Assert.IsTrue(hash1.SlotCount == 1024); } } }
{ "content_hash": "0221a1acb9899898b3d19e1a4c28a6d2", "timestamp": "", "source": "github", "line_count": 817, "max_line_length": 117, "avg_line_length": 32.98408812729498, "alnum_prop": 0.48530503191331453, "repo_name": "Vyara/Telerik-High-Quality-Code-Homeworks", "id": "7abed246dcaa88cb97e18af89c90285448970c23", "size": "26950", "binary": false, "copies": "13", "ref": "refs/heads/master", "path": "02. Code Formatting/Task 1. Formatting C#/PowerCollections/Source/UnitTests/HashTests.cs", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "2479063" }, { "name": "JavaScript", "bytes": "2887" } ], "symlink_target": "" }
class InitializeUserNames < ActiveRecord::Migration def self.up User.find(:all).select{|u| u.name.blank?}.each do |u| u.name = u.login u.save end end def self.down end end
{ "content_hash": "f2a82a854956f39c56a4e76bba18955b", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 57, "avg_line_length": 18.272727272727273, "alnum_prop": 0.6368159203980099, "repo_name": "Robert-123/echowaves", "id": "cc8b347c4476adae5decae92f6186a445b94f709", "size": "201", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "db/migrate/20081024034115_initialize_user_names.rb", "mode": "33261", "license": "mit", "language": [], "symlink_target": "" }
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/jhash.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/etherdevice.h> #include <linux/genetlink.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/ethtool.h> #include <linux/wait.h> #include <asm/div64.h> #include <linux/highmem.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <linux/inetdevice.h> #include <linux/list.h> #include <linux/openvswitch.h> #include <linux/rculist.h> #include <linux/dmi.h> #include <linux/workqueue.h> #include <net/genetlink.h> #include "datapath.h" #include "flow.h" #include "vport-internal_dev.h" /** * DOC: Locking: * * Writes to device state (add/remove datapath, port, set operations on vports, * etc.) are protected by RTNL. * * Writes to other state (flow table modifications, set miscellaneous datapath * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside * genl_mutex. * * Reads are protected by RCU. * * There are a few special cases (mostly stats) that have their own * synchronization but they nest under all of above and don't interact with * each other. */ /* Global list of datapaths to enable dumping them all out. * Protected by genl_mutex. */ static LIST_HEAD(dps); #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) static void rehash_flow_table(struct work_struct *work); static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ static struct datapath *get_dp(int dp_ifindex) { struct datapath *dp = NULL; struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(&init_net, dp_ifindex); if (dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); if (vport) dp = vport->dp; } rcu_read_unlock(); return dp; } /* Must be called with rcu_read_lock or RTNL lock. */ const char *ovs_dp_name(const struct datapath *dp) { struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]); return vport->ops->get_name(vport); } static int get_dpifindex(struct datapath *dp) { struct vport *local; int ifindex; rcu_read_lock(); local = rcu_dereference(dp->ports[OVSP_LOCAL]); if (local) ifindex = local->ops->get_ifindex(local); else ifindex = 0; rcu_read_unlock(); return ifindex; } static void destroy_dp_rcu(struct rcu_head *rcu) { struct datapath *dp = container_of(rcu, struct datapath, rcu); ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); free_percpu(dp->stats_percpu); kfree(dp); } /* Called with RTNL lock and genl_lock. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; vport = ovs_vport_add(parms); if (!IS_ERR(vport)) { struct datapath *dp = parms->dp; rcu_assign_pointer(dp->ports[parms->port_no], vport); list_add(&vport->node, &dp->port_list); } return vport; } /* Called with RTNL lock. */ void ovs_dp_detach_port(struct vport *p) { ASSERT_RTNL(); /* First drop references to device. */ list_del(&p->node); rcu_assign_pointer(p->dp->ports[p->port_no], NULL); /* Then destroy it. */ ovs_vport_del(p); } /* Must be called with rcu_read_lock. */ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) { struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; struct sw_flow_key key; u64 *stats_counter; int error; int key_len; stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_extract(skb, p->port_no, &key, &key_len); if (unlikely(error)) { kfree_skb(skb); return; } /* Look up flow. */ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); if (unlikely(!flow)) { struct dp_upcall_info upcall; upcall.cmd = OVS_PACKET_CMD_MISS; upcall.key = &key; upcall.userdata = NULL; upcall.pid = p->upcall_pid; ovs_dp_upcall(dp, skb, &upcall); consume_skb(skb); stats_counter = &stats->n_missed; goto out; } OVS_CB(skb)->flow = flow; stats_counter = &stats->n_hit; ovs_flow_used(OVS_CB(skb)->flow, skb); ovs_execute_actions(dp, skb); out: /* Update datapath statistics. */ u64_stats_update_begin(&stats->sync); (*stats_counter)++; u64_stats_update_end(&stats->sync); } static struct genl_family dp_packet_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_PACKET_FAMILY, .version = OVS_PACKET_VERSION, .maxattr = OVS_PACKET_ATTR_MAX }; int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; int dp_ifindex; int err; if (upcall_info->pid == 0) { err = -ENOTCONN; goto err; } dp_ifindex = get_dpifindex(dp); if (!dp_ifindex) { err = -ENODEV; goto err; } if (!skb_is_gso(skb)) err = queue_userspace_packet(dp_ifindex, skb, upcall_info); else err = queue_gso_packets(dp_ifindex, skb, upcall_info); if (err) goto err; return 0; err: stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); u64_stats_update_begin(&stats->sync); stats->n_lost++; u64_stats_update_end(&stats->sync); return err; } static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct dp_upcall_info later_info; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); if (IS_ERR(skb)) return PTR_ERR(skb); /* Queue all of the segments. */ skb = segs; do { err = queue_userspace_packet(dp_ifindex, skb, upcall_info); if (err) break; if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { /* The initial flow key extracted by ovs_flow_extract() * in this case is for a first fragment, so we need to * properly mark later fragments. */ later_key = *upcall_info->key; later_key.ip.frag = OVS_FRAG_TYPE_LATER; later_info = *upcall_info; later_info.key = &later_key; upcall_info = &later_info; } } while ((skb = skb->next)); /* Free all of the segments. */ skb = segs; do { nskb = skb->next; if (err) kfree_skb(skb); else consume_skb(skb); } while ((skb = nskb)); return err; } static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; } /* Called with genl_mutex. */ static int flush_flows(int dp_ifindex) { struct flow_table *old_table; struct flow_table *new_table; struct datapath *dp; dp = get_dp(dp_ifindex); if (!dp) return -ENODEV; old_table = genl_dereference(dp->table); new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); if (!new_table) return -ENOMEM; rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(old_table); return 0; } static int validate_actions(const struct nlattr *attr, const struct sw_flow_key *key, int depth); static int validate_sample(const struct nlattr *attr, const struct sw_flow_key *key, int depth) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; const struct nlattr *a; int rem; memset(attrs, 0, sizeof(attrs)); nla_for_each_nested(a, attr, rem) { int type = nla_type(a); if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) return -EINVAL; attrs[type] = a; } if (rem) return -EINVAL; probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; if (!probability || nla_len(probability) != sizeof(u32)) return -EINVAL; actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) return -EINVAL; return validate_actions(actions, key, depth + 1); } static int validate_tp_port(const struct sw_flow_key *flow_key) { if (flow_key->eth.type == htons(ETH_P_IP)) { if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) return 0; } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) return 0; } return -EINVAL; } static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); /* There can be only one key in a action */ if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; if (key_type > OVS_KEY_ATTR_MAX || nla_len(ovs_key) != ovs_key_lens[key_type]) return -EINVAL; switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; case OVS_KEY_ATTR_PRIORITY: case OVS_KEY_ATTR_ETHERNET: break; case OVS_KEY_ATTR_IPV4: if (flow_key->eth.type != htons(ETH_P_IP)) return -EINVAL; if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) return -EINVAL; ipv4_key = nla_data(ovs_key); if (ipv4_key->ipv4_proto != flow_key->ip.proto) return -EINVAL; if (ipv4_key->ipv4_frag != flow_key->ip.frag) return -EINVAL; break; case OVS_KEY_ATTR_TCP: if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; return validate_tp_port(flow_key); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; return validate_tp_port(flow_key); default: return -EINVAL; } return 0; } static int validate_userspace(const struct nlattr *attr) { static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy); if (error) return error; if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) return -EINVAL; return 0; } static int validate_actions(const struct nlattr *attr, const struct sw_flow_key *key, int depth) { const struct nlattr *a; int rem, err; if (depth >= SAMPLE_ACTION_DEPTH) return -EOVERFLOW; nla_for_each_nested(a, attr, rem) { /* Expected argument lengths, (u32)-1 for variable length. */ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); if (type > OVS_ACTION_ATTR_MAX || (action_lens[type] != nla_len(a) && action_lens[type] != (u32)-1)) return -EINVAL; switch (type) { case OVS_ACTION_ATTR_UNSPEC: return -EINVAL; case OVS_ACTION_ATTR_USERSPACE: err = validate_userspace(a); if (err) return err; break; case OVS_ACTION_ATTR_OUTPUT: if (nla_get_u32(a) >= DP_MAX_PORTS) return -EINVAL; break; case OVS_ACTION_ATTR_POP_VLAN: break; case OVS_ACTION_ATTR_PUSH_VLAN: vlan = nla_data(a); if (vlan->vlan_tpid != htons(ETH_P_8021Q)) return -EINVAL; if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) return -EINVAL; break; case OVS_ACTION_ATTR_SET: err = validate_set(a, key); if (err) return err; break; case OVS_ACTION_ATTR_SAMPLE: err = validate_sample(a, key, depth); if (err) return err; break; default: return -EINVAL; } } if (rem > 0) return -EINVAL; return 0; } static void clear_stats(struct sw_flow *flow) { flow->used = 0; flow->tcp_flags = 0; flow->packet_count = 0; flow->byte_count = 0; } static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) { struct ovs_header *ovs_header = info->userhdr; struct nlattr **a = info->attrs; struct sw_flow_actions *acts; struct sk_buff *packet; struct sw_flow *flow; struct datapath *dp; struct ethhdr *eth; int len; int err; int key_len; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || !a[OVS_PACKET_ATTR_ACTIONS] || nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) goto err; len = nla_len(a[OVS_PACKET_ATTR_PACKET]); packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); err = -ENOMEM; if (!packet) goto err; skb_reserve(packet, NET_IP_ALIGN); memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); skb_reset_mac_header(packet); eth = eth_hdr(packet); /* Normally, setting the skb 'protocol' field would be handled by a * call to eth_type_trans(), but it assumes there's a sending * device, which we may not have. */ if (ntohs(eth->h_proto) >= 1536) packet->protocol = eth->h_proto; else packet->protocol = htons(ETH_P_802_2); /* Build an sw_flow for sending this packet. */ flow = ovs_flow_alloc(); err = PTR_ERR(flow); if (IS_ERR(flow)) goto err_kfree_skb; err = ovs_flow_extract(packet, -1, &flow->key, &key_len); if (err) goto err_flow_free; err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, &flow->key.phy.in_port, a[OVS_PACKET_ATTR_KEY]); if (err) goto err_flow_free; err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0); if (err) goto err_flow_free; flow->hash = ovs_flow_hash(&flow->key, key_len); acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); err = PTR_ERR(acts); if (IS_ERR(acts)) goto err_flow_free; rcu_assign_pointer(flow->sf_acts, acts); OVS_CB(packet)->flow = flow; packet->priority = flow->key.phy.priority; rcu_read_lock(); dp = get_dp(ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto err_unlock; local_bh_disable(); err = ovs_execute_actions(dp, packet); local_bh_enable(); rcu_read_unlock(); ovs_flow_free(flow); return err; err_unlock: rcu_read_unlock(); err_flow_free: ovs_flow_free(flow); err_kfree_skb: kfree_skb(packet); err: return err; } static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, }; static struct genl_ops dp_packet_genl_ops[] = { { .cmd = OVS_PACKET_CMD_EXECUTE, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = packet_policy, .doit = ovs_packet_cmd_execute } }; static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) { int i; struct flow_table *table = genl_dereference(dp->table); stats->n_flows = ovs_flow_tbl_count(table); stats->n_hit = stats->n_missed = stats->n_lost = 0; for_each_possible_cpu(i) { const struct dp_stats_percpu *percpu_stats; struct dp_stats_percpu local_stats; unsigned int start; percpu_stats = per_cpu_ptr(dp->stats_percpu, i); do { start = u64_stats_fetch_begin_bh(&percpu_stats->sync); local_stats = *percpu_stats; } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); stats->n_hit += local_stats.n_hit; stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; } } static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, }; static struct genl_family dp_flow_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_FLOW_FAMILY, .version = OVS_FLOW_VERSION, .maxattr = OVS_FLOW_ATTR_MAX }; static struct genl_multicast_group ovs_dp_flow_multicast_group = { .name = OVS_FLOW_MCGROUP }; /* Called with genl_lock. */ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { const int skb_orig_len = skb->len; const struct sw_flow_actions *sf_acts; struct ovs_flow_stats stats; struct ovs_header *ovs_header; struct nlattr *nla; unsigned long used; u8 tcp_flags; int err; sf_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; ovs_header->dp_ifindex = get_dpifindex(dp); nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) goto nla_put_failure; err = ovs_flow_to_nlattrs(&flow->key, skb); if (err) goto error; nla_nest_end(skb, nla); spin_lock_bh(&flow->lock); used = flow->used; stats.n_packets = flow->packet_count; stats.n_bytes = flow->byte_count; tcp_flags = flow->tcp_flags; spin_unlock_bh(&flow->lock); if (used) NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); if (stats.n_packets) NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats); if (tcp_flags) NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for * Netlink but individual action lists can be longer than * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. * The userspace caller can always fetch the actions separately if it * really wants them. (Most userspace callers in fact don't care.) * * This can only fail for dump operations because the skb is always * properly sized for single flows. */ err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, sf_acts->actions); if (err < 0 && skb_orig_len) goto error; return genlmsg_end(skb, ovs_header); nla_put_failure: err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; } static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) { const struct sw_flow_actions *sf_acts; int len; sf_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); /* OVS_FLOW_ATTR_KEY */ len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_ACTIONS */ len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_STATS */ len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_TCP_FLAGS */ len += nla_total_size(1); /* OVS_FLOW_ATTR_USED */ len += nla_total_size(8); len += NLMSG_ALIGN(sizeof(struct ovs_header)); return genlmsg_new(len, GFP_KERNEL); } static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = ovs_flow_cmd_alloc_info(flow); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd); BUG_ON(retval < 0); return skb; } static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sw_flow *flow; struct sk_buff *reply; struct datapath *dp; struct flow_table *table; int error; int key_len; /* Extract key. */ error = -EINVAL; if (!a[OVS_FLOW_ATTR_KEY]) goto error; error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (error) goto error; /* Validate actions. */ if (a[OVS_FLOW_ATTR_ACTIONS]) { error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0); if (error) goto error; } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { error = -EINVAL; goto error; } dp = get_dp(ovs_header->dp_ifindex); error = -ENODEV; if (!dp) goto error; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) { struct sw_flow_actions *acts; /* Bail out if we're not allowed to create a new flow. */ error = -ENOENT; if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) goto error; /* Expand table, if necessary, to make room. */ if (ovs_flow_tbl_need_to_expand(table)) { struct flow_table *new_table; new_table = ovs_flow_tbl_expand(table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(table); table = genl_dereference(dp->table); } } /* Allocate flow. */ flow = ovs_flow_alloc(); if (IS_ERR(flow)) { error = PTR_ERR(flow); goto error; } flow->key = key; clear_stats(flow); /* Obtain actions. */ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); error = PTR_ERR(acts); if (IS_ERR(acts)) goto error_free_flow; rcu_assign_pointer(flow->sf_acts, acts); /* Put flow in bucket. */ flow->hash = ovs_flow_hash(&key, key_len); ovs_flow_tbl_insert(table, flow); reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); } else { /* We found a matching flow. */ struct sw_flow_actions *old_acts; struct nlattr *acts_attrs; /* Bail out if we're not allowed to modify an existing flow. * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL * because Generic Netlink treats the latter as a dump * request. We also accept NLM_F_EXCL in case that bug ever * gets fixed. */ error = -EEXIST; if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) goto error; /* Update actions. */ old_acts = rcu_dereference_protected(flow->sf_acts, lockdep_genl_is_held()); acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; if (acts_attrs && (old_acts->actions_len != nla_len(acts_attrs) || memcmp(old_acts->actions, nla_data(acts_attrs), old_acts->actions_len))) { struct sw_flow_actions *new_acts; new_acts = ovs_flow_actions_alloc(acts_attrs); error = PTR_ERR(new_acts); if (IS_ERR(new_acts)) goto error; rcu_assign_pointer(flow->sf_acts, new_acts); ovs_flow_deferred_free_acts(old_acts); } reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); /* Clear stats. */ if (a[OVS_FLOW_ATTR_CLEAR]) { spin_lock_bh(&flow->lock); clear_stats(flow); spin_unlock_bh(&flow->lock); } } if (!IS_ERR(reply)) genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); else netlink_set_err(init_net.genl_sock, 0, ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); return 0; error_free_flow: ovs_flow_free(flow); error: return error; } static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; struct flow_table *table; int err; int key_len; if (!a[OVS_FLOW_ATTR_KEY]) return -EINVAL; err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) return err; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) return -ENOENT; reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); return genlmsg_reply(reply, info); } static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sw_flow_key key; struct sk_buff *reply; struct sw_flow *flow; struct datapath *dp; struct flow_table *table; int err; int key_len; if (!a[OVS_FLOW_ATTR_KEY]) return flush_flows(ovs_header->dp_ifindex); err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) return err; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) return -ENOENT; reply = ovs_flow_cmd_alloc_info(flow); if (!reply) return -ENOMEM; ovs_flow_tbl_remove(table, flow); err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid, info->snd_seq, 0, OVS_FLOW_CMD_DEL); BUG_ON(err < 0); ovs_flow_deferred_free(flow); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; } static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; struct flow_table *table; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; table = genl_dereference(dp->table); for (;;) { struct sw_flow *flow; u32 bucket, obj; bucket = cb->args[0]; obj = cb->args[1]; flow = ovs_flow_tbl_next(table, &bucket, &obj); if (!flow) break; if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_FLOW_CMD_NEW) < 0) break; cb->args[0] = bucket; cb->args[1] = obj; } return skb->len; } static struct genl_ops dp_flow_genl_ops[] = { { .cmd = OVS_FLOW_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_new_or_set }, { .cmd = OVS_FLOW_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_del }, { .cmd = OVS_FLOW_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = flow_policy, .doit = ovs_flow_cmd_get, .dumpit = ovs_flow_cmd_dump }, { .cmd = OVS_FLOW_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = flow_policy, .doit = ovs_flow_cmd_new_or_set, }, }; static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, }; static struct genl_family dp_datapath_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_DATAPATH_FAMILY, .version = OVS_DATAPATH_VERSION, .maxattr = OVS_DP_ATTR_MAX }; static struct genl_multicast_group ovs_dp_datapath_multicast_group = { .name = OVS_DATAPATH_MCGROUP }; static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; struct ovs_dp_stats dp_stats; int err; ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family, flags, cmd); if (!ovs_header) goto error; ovs_header->dp_ifindex = get_dpifindex(dp); rcu_read_lock(); err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); rcu_read_unlock(); if (err) goto nla_put_failure; get_dp_stats(dp, &dp_stats); NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); return genlmsg_end(skb, ovs_header); nla_put_failure: genlmsg_cancel(skb, ovs_header); error: return -EMSGSIZE; } static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd); if (retval < 0) { kfree_skb(skb); return ERR_PTR(retval); } return skb; } /* Called with genl_mutex and optionally with RTNL lock also. */ static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; if (!a[OVS_DP_ATTR_NAME]) dp = get_dp(ovs_header->dp_ifindex); else { struct vport *vport; rcu_read_lock(); vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME])); dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; rcu_read_unlock(); } return dp ? dp : ERR_PTR(-ENODEV); } static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct vport_parms parms; struct sk_buff *reply; struct datapath *dp; struct vport *vport; int err; err = -EINVAL; if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) goto err; rtnl_lock(); err = -ENODEV; if (!try_module_get(THIS_MODULE)) goto err_unlock_rtnl; err = -ENOMEM; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (dp == NULL) goto err_put_module; INIT_LIST_HEAD(&dp->port_list); /* Allocate table. */ err = -ENOMEM; rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); if (!dp->table) goto err_free_dp; dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); if (!dp->stats_percpu) { err = -ENOMEM; goto err_destroy_table; } /* Set up our datapath device. */ parms.name = nla_data(a[OVS_DP_ATTR_NAME]); parms.type = OVS_VPORT_TYPE_INTERNAL; parms.options = NULL; parms.dp = dp; parms.port_no = OVSP_LOCAL; parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); vport = new_vport(&parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); if (err == -EBUSY) err = -EEXIST; goto err_destroy_percpu; } reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); err = PTR_ERR(reply); if (IS_ERR(reply)) goto err_destroy_local_port; list_add_tail(&dp->list_node, &dps); rtnl_unlock(); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; err_destroy_local_port: ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: ovs_flow_tbl_destroy(genl_dereference(dp->table)); err_free_dp: kfree(dp); err_put_module: module_put(THIS_MODULE); err_unlock_rtnl: rtnl_unlock(); err: return err; } static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct vport *vport, *next_vport; struct sk_buff *reply; struct datapath *dp; int err; rtnl_lock(); dp = lookup_datapath(info->userhdr, info->attrs); err = PTR_ERR(dp); if (IS_ERR(dp)) goto exit_unlock; reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; list_for_each_entry_safe(vport, next_vport, &dp->port_list, node) if (vport->port_no != OVSP_LOCAL) ovs_dp_detach_port(vport); list_del(&dp->list_node); ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL])); /* rtnl_unlock() will wait until all the references to devices that * are pending unregistration have been dropped. We do it here to * ensure that any internal devices (which contain DP pointers) are * fully destroyed before freeing the datapath. */ rtnl_unlock(); call_rcu(&dp->rcu, destroy_dp_rcu); module_put(THIS_MODULE); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; exit_unlock: rtnl_unlock(); return err; } static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *reply; struct datapath *dp; int err; dp = lookup_datapath(info->userhdr, info->attrs); if (IS_ERR(dp)) return PTR_ERR(dp); reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); netlink_set_err(init_net.genl_sock, 0, ovs_dp_datapath_multicast_group.id, err); return 0; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); return 0; } static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *reply; struct datapath *dp; dp = lookup_datapath(info->userhdr, info->attrs); if (IS_ERR(dp)) return PTR_ERR(dp); reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); if (IS_ERR(reply)) return PTR_ERR(reply); return genlmsg_reply(reply, info); } static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct datapath *dp; int skip = cb->args[0]; int i = 0; list_for_each_entry(dp, &dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_DP_CMD_NEW) < 0) break; i++; } cb->args[0] = i; return skb->len; } static struct genl_ops dp_datapath_genl_ops[] = { { .cmd = OVS_DP_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_new }, { .cmd = OVS_DP_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_del }, { .cmd = OVS_DP_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = datapath_policy, .doit = ovs_dp_cmd_get, .dumpit = ovs_dp_cmd_dump }, { .cmd = OVS_DP_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = datapath_policy, .doit = ovs_dp_cmd_set, }, }; static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, }; static struct genl_family dp_vport_genl_family = { .id = GENL_ID_GENERATE, .hdrsize = sizeof(struct ovs_header), .name = OVS_VPORT_FAMILY, .version = OVS_VPORT_VERSION, .maxattr = OVS_VPORT_ATTR_MAX }; struct genl_multicast_group ovs_dp_vport_multicast_group = { .name = OVS_VPORT_MCGROUP }; /* Called with RTNL lock or RCU read lock. */ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) { struct ovs_header *ovs_header; struct ovs_vport_stats vport_stats; int err; ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family, flags, cmd); if (!ovs_header) return -EMSGSIZE; ovs_header->dp_ifindex = get_dpifindex(vport->dp); NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); ovs_vport_get_stats(vport, &vport_stats); NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), &vport_stats); err = ovs_vport_get_options(vport, skb); if (err == -EMSGSIZE) goto error; return genlmsg_end(skb, ovs_header); nla_put_failure: err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; } /* Called with RTNL lock or RCU read lock. */ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid, u32 seq, u8 cmd) { struct sk_buff *skb; int retval; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd); if (retval < 0) { kfree_skb(skb); return ERR_PTR(retval); } return skb; } /* Called with RTNL lock or RCU read lock. */ static struct vport *lookup_vport(struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { struct datapath *dp; struct vport *vport; if (a[OVS_VPORT_ATTR_NAME]) { vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); if (!vport) return ERR_PTR(-ENODEV); if (ovs_header->dp_ifindex && ovs_header->dp_ifindex != get_dpifindex(vport->dp)) return ERR_PTR(-ENODEV); return vport; } else if (a[OVS_VPORT_ATTR_PORT_NO]) { u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); if (port_no >= DP_MAX_PORTS) return ERR_PTR(-EFBIG); dp = get_dp(ovs_header->dp_ifindex); if (!dp) return ERR_PTR(-ENODEV); vport = rcu_dereference_rtnl(dp->ports[port_no]); if (!vport) return ERR_PTR(-ENOENT); return vport; } else return ERR_PTR(-EINVAL); } static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct vport_parms parms; struct sk_buff *reply; struct vport *vport; struct datapath *dp; u32 port_no; int err; err = -EINVAL; if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || !a[OVS_VPORT_ATTR_UPCALL_PID]) goto exit; rtnl_lock(); dp = get_dp(ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto exit_unlock; if (a[OVS_VPORT_ATTR_PORT_NO]) { port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); err = -EFBIG; if (port_no >= DP_MAX_PORTS) goto exit_unlock; vport = rtnl_dereference(dp->ports[port_no]); err = -EBUSY; if (vport) goto exit_unlock; } else { for (port_no = 1; ; port_no++) { if (port_no >= DP_MAX_PORTS) { err = -EFBIG; goto exit_unlock; } vport = rtnl_dereference(dp->ports[port_no]); if (!vport) break; } } parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); parms.options = a[OVS_VPORT_ATTR_OPTIONS]; parms.dp = dp; parms.port_no = port_no; parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); vport = new_vport(&parms); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { err = PTR_ERR(reply); ovs_dp_detach_port(vport); goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); exit: return err; } static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct sk_buff *reply; struct vport *vport; int err; rtnl_lock(); vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; err = 0; if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) err = -EINVAL; if (!err && a[OVS_VPORT_ATTR_OPTIONS]) err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); if (!err && a[OVS_VPORT_ATTR_UPCALL_PID]) vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { netlink_set_err(init_net.genl_sock, 0, ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); return err; } static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct sk_buff *reply; struct vport *vport; int err; rtnl_lock(); vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; if (vport->port_no == OVSP_LOCAL) { err = -EINVAL; goto exit_unlock; } reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_DEL); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; ovs_dp_detach_port(vport); genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: rtnl_unlock(); return err; } static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; struct sk_buff *reply; struct vport *vport; int err; rcu_read_lock(); vport = lookup_vport(ovs_header, a); err = PTR_ERR(vport); if (IS_ERR(vport)) goto exit_unlock; reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); err = PTR_ERR(reply); if (IS_ERR(reply)) goto exit_unlock; rcu_read_unlock(); return genlmsg_reply(reply, info); exit_unlock: rcu_read_unlock(); return err; } static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; u32 port_no; int retval; dp = get_dp(ovs_header->dp_ifindex); if (!dp) return -ENODEV; rcu_read_lock(); for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { struct vport *vport; vport = rcu_dereference(dp->ports[port_no]); if (!vport) continue; if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, OVS_VPORT_CMD_NEW) < 0) break; } rcu_read_unlock(); cb->args[0] = port_no; retval = skb->len; return retval; } static void rehash_flow_table(struct work_struct *work) { struct datapath *dp; genl_lock(); list_for_each_entry(dp, &dps, list_node) { struct flow_table *old_table = genl_dereference(dp->table); struct flow_table *new_table; new_table = ovs_flow_tbl_rehash(old_table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(old_table); } } genl_unlock(); schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); } static struct genl_ops dp_vport_genl_ops[] = { { .cmd = OVS_VPORT_CMD_NEW, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_new }, { .cmd = OVS_VPORT_CMD_DEL, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_del }, { .cmd = OVS_VPORT_CMD_GET, .flags = 0, /* OK for unprivileged users. */ .policy = vport_policy, .doit = ovs_vport_cmd_get, .dumpit = ovs_vport_cmd_dump }, { .cmd = OVS_VPORT_CMD_SET, .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ .policy = vport_policy, .doit = ovs_vport_cmd_set, }, }; struct genl_family_and_ops { struct genl_family *family; struct genl_ops *ops; int n_ops; struct genl_multicast_group *group; }; static const struct genl_family_and_ops dp_genl_families[] = { { &dp_datapath_genl_family, dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), &ovs_dp_datapath_multicast_group }, { &dp_vport_genl_family, dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), &ovs_dp_vport_multicast_group }, { &dp_flow_genl_family, dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), &ovs_dp_flow_multicast_group }, { &dp_packet_genl_family, dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), NULL }, }; static void dp_unregister_genl(int n_families) { int i; for (i = 0; i < n_families; i++) genl_unregister_family(dp_genl_families[i].family); } static int dp_register_genl(void) { int n_registered; int err; int i; n_registered = 0; for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { const struct genl_family_and_ops *f = &dp_genl_families[i]; err = genl_register_family_with_ops(f->family, f->ops, f->n_ops); if (err) goto error; n_registered++; if (f->group) { err = genl_register_mc_group(f->family, f->group); if (err) goto error; } } return 0; error: dp_unregister_genl(n_registered); return err; } static int __init dp_init(void) { struct sk_buff *dummy_skb; int err; BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb)); pr_info("Open vSwitch switching datapath\n"); err = ovs_flow_init(); if (err) goto error; err = ovs_vport_init(); if (err) goto error_flow_exit; err = register_netdevice_notifier(&ovs_dp_device_notifier); if (err) goto error_vport_exit; err = dp_register_genl(); if (err < 0) goto error_unreg_notifier; schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); return 0; error_unreg_notifier: unregister_netdevice_notifier(&ovs_dp_device_notifier); error_vport_exit: ovs_vport_exit(); error_flow_exit: ovs_flow_exit(); error: return err; } static void dp_cleanup(void) { cancel_delayed_work_sync(&rehash_flow_wq); rcu_barrier(); dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); unregister_netdevice_notifier(&ovs_dp_device_notifier); ovs_vport_exit(); ovs_flow_exit(); } module_init(dp_init); module_exit(dp_cleanup); MODULE_DESCRIPTION("Open vSwitch switching datapath"); MODULE_LICENSE("GPL");
{ "content_hash": "715b14d06e2755a10e371bef3e9ceaf9", "timestamp": "", "source": "github", "line_count": 1903, "max_line_length": 80, "avg_line_length": 24.105097214923806, "alnum_prop": 0.6574162888036275, "repo_name": "michelborgess/RealOne-Victara-Kernel", "id": "e66341ec455c3d7c588680bd829e044b7d7752ce", "size": "46582", "binary": false, "copies": "4310", "ref": "refs/heads/master", "path": "net/openvswitch/datapath.c", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ASP", "bytes": "4528" }, { "name": "Assembly", "bytes": "8953302" }, { "name": "Awk", "bytes": "18610" }, { "name": "C", "bytes": "458831037" }, { "name": "C++", "bytes": "4466976" }, { "name": "Groff", "bytes": "22788" }, { "name": "Lex", "bytes": "40798" }, { "name": "Makefile", "bytes": "1273896" }, { "name": "Objective-C", "bytes": "751522" }, { "name": "Perl", "bytes": "357940" }, { "name": "Python", "bytes": "32666" }, { "name": "Scilab", "bytes": "21433" }, { "name": "Shell", "bytes": "123788" }, { "name": "SourcePawn", "bytes": "4856" }, { "name": "UnrealScript", "bytes": "20838" }, { "name": "Yacc", "bytes": "83091" } ], "symlink_target": "" }
#ifndef _PASSENGER_LOGGING_SERVER_H_ #define _PASSENGER_LOGGING_SERVER_H_ #include <oxt/system_calls.hpp> #include <oxt/macros.hpp> #include <boost/shared_ptr.hpp> #include <string> #include <ostream> #include <sstream> #include <map> #include <ev++.h> #include <sys/types.h> #include <sys/time.h> #include <sys/stat.h> #include <grp.h> #include <cstring> #include <ctime> #include <cassert> #include "DataStoreId.h" #include "RemoteSender.h" #include "ChangeNotifier.h" #include "../EventedMessageServer.h" #include "../MessageReadersWriters.h" #include "../StaticString.h" #include "../Exceptions.h" #include "../MessageChannel.h" #include "../Constants.h" #include "../Utils.h" #include "../Utils/MD5.h" #include "../Utils/IOUtils.h" #include "../Utils/StrIntUtils.h" namespace Passenger { using namespace std; using namespace boost; using namespace oxt; class LoggingServer: public EventedMessageServer { private: static const int MAX_LOG_SINK_CACHE_SIZE = 512; static const int GARBAGE_COLLECTION_TIMEOUT = 1.25 * 60 * 60; // 1 hour 15 minutes struct LogSink; typedef shared_ptr<LogSink> LogSinkPtr; typedef map<string, LogSinkPtr> LogSinkCache; struct LogSink { LoggingServer *server; /** * Marks how many times this LogSink is currently opened, i.e. the * number of Transaction objects currently referencing this LogSink. * @invariant * (opened == 0) == (this LogSink is in LoggingServer.inactiveLogSinks) */ int opened; /** Last time this LogSink hit an open count of 0. */ ev_tstamp lastUsed; /** Last time data was actually written to the underlying storage device. */ ev_tstamp lastFlushed; /** * This LogSink's iterator inside LoggingServer.logSinkCache. */ LogSinkCache::iterator cacheIterator; /** * This LogSink's iterator inside LoggingServer.inactiveLogSinks. * Only valid when opened == 0. */ list<LogSinkPtr>::iterator inactiveLogSinksIterator; LogSink(LoggingServer *_server) { server = _server; opened = 0; lastUsed = ev_now(server->getLoop()); lastFlushed = 0; } virtual ~LogSink() { // We really want to flush() here but can't call virtual // functions in destructor. :( } virtual bool isRemote() const { return false; } virtual void append(const DataStoreId &dataStoreId, const StaticString &data) = 0; virtual void flush() { } virtual void dump(ostream &stream) const { } }; struct LogFile: public LogSink { static const unsigned int BUFFER_CAPACITY = 8 * 1024; string filename; FileDescriptor fd; char buffer[BUFFER_CAPACITY]; unsigned int bufferSize; /** * Contains every (groupName, nodeName, category) tuple for * which their data is currently buffered in this sink. */ set<DataStoreId> dataStoreIds; LogFile(LoggingServer *server, const string &filename, mode_t filePermissions) : LogSink(server) { int ret; bufferSize = 0; this->filename = filename; fd = syscalls::open(filename.c_str(), O_CREAT | O_WRONLY | O_APPEND, filePermissions); if (fd == -1) { int e = errno; throw FileSystemException("Cannnot open file", e, filename); } do { ret = fchmod(fd, filePermissions); } while (ret == -1 && errno == EINTR); } virtual ~LogFile() { flush(); } void notifyChanges() { if (server->changeNotifier != NULL) { set<DataStoreId>::const_iterator it; set<DataStoreId>::const_iterator end = dataStoreIds.end(); for (it = dataStoreIds.begin(); it != dataStoreIds.end(); it++) { server->changeNotifier->changed(*it); } } dataStoreIds.clear(); } virtual void append(const DataStoreId &dataStoreId, const StaticString &data) { if (server->changeNotifier != NULL) { dataStoreIds.insert(dataStoreId); } if (bufferSize + data.size() > BUFFER_CAPACITY) { StaticString data2[2]; data2[0] = StaticString(buffer, bufferSize); data2[1] = data; gatheredWrite(fd, data2, 2); lastFlushed = ev_now(server->getLoop()); bufferSize = 0; notifyChanges(); } else { memcpy(buffer + bufferSize, data.data(), data.size()); bufferSize += data.size(); } } virtual void flush() { if (bufferSize > 0) { lastFlushed = ev_now(server->getLoop()); MessageChannel(fd).writeRaw(StaticString(buffer, bufferSize)); bufferSize = 0; notifyChanges(); } } virtual void dump(ostream &stream) const { stream << " Log file: file=" << filename << ", " "opened=" << opened << ", " "age=" << long(ev_now(server->getLoop()) - lastUsed) << "\n"; } }; typedef shared_ptr<LogFile> LogFilePtr; struct RemoteSink: public LogSink { /* RemoteSender compresses the data with zlib before sending it * to the server. Even including Base64 and URL encoding overhead, * this compresses the data to about 25% of its original size. * Therefore we set a buffer capacity of a little less than 4 times * the TCP maximum segment size so that we can send as much * data as possible to the server in a single TCP segment. * With the "little less" we take into account: * - HTTPS overhead. This can be as high as 2 KB. * - The fact that RemoteSink.append() might try to flush the * current buffer the current data. Empirical evidence has * shown that the data for a request transaction is usually * less than 5 KB. */ static const unsigned int BUFFER_CAPACITY = 4 * 64 * 1024 - 16 * 1024; string unionStationKey; string nodeName; string category; char buffer[BUFFER_CAPACITY]; unsigned int bufferSize; RemoteSink(LoggingServer *server, const string &unionStationKey, const string &nodeName, const string &category) : LogSink(server) { this->unionStationKey = unionStationKey; this->nodeName = nodeName; this->category = category; this->bufferSize = 0; } virtual ~RemoteSink() { flush(); } virtual bool isRemote() const { return true; } virtual void append(const DataStoreId &dataStoreId, const StaticString &data) { if (bufferSize + data.size() > BUFFER_CAPACITY) { StaticString data2[2]; data2[0] = StaticString(buffer, bufferSize); data2[1] = data; server->remoteSender.schedule(unionStationKey, nodeName, category, data2, 2); lastFlushed = ev_now(server->getLoop()); bufferSize = 0; } else { memcpy(buffer + bufferSize, data.data(), data.size()); bufferSize += data.size(); } } virtual void flush() { if (bufferSize > 0) { lastFlushed = ev_now(server->getLoop()); StaticString data(buffer, bufferSize); server->remoteSender.schedule(unionStationKey, nodeName, category, &data, 1); bufferSize = 0; } } virtual void dump(ostream &stream) const { stream << " Remote sink: " "key=" << unionStationKey << ", " "node=" << nodeName << ", " "category=" << category << ", " "opened=" << opened << ", " "age=" << long(ev_now(server->getLoop()) - lastUsed) << ", " "bufferSize=" << bufferSize << "\n"; } }; struct Transaction { LoggingServer *server; LogSinkPtr logSink; string txnId; DataStoreId dataStoreId; unsigned int writeCount; int refcount; bool crashProtect, discarded; string data; Transaction(LoggingServer *server) { this->server = server; data.reserve(8 * 1024); } ~Transaction() { if (logSink != NULL) { if (!discarded) { logSink->append(dataStoreId, data); } server->closeLogSink(logSink); } } StaticString getGroupName() const { return dataStoreId.getGroupName(); } StaticString getNodeName() const { return dataStoreId.getNodeName(); } StaticString getCategory() const { return dataStoreId.getCategory(); } void discard() { data.clear(); discarded = true; } void dump(ostream &stream) const { stream << " Transaction " << txnId << ":\n"; stream << " Group : " << getGroupName() << "\n"; stream << " Node : " << getNodeName() << "\n"; stream << " Category: " << getCategory() << "\n"; stream << " Refcount: " << refcount << "\n"; } }; typedef shared_ptr<Transaction> TransactionPtr; enum ClientType { UNINITIALIZED, LOGGER, WATCHER }; struct Client: public EventedMessageClient { string nodeName; ClientType type; char nodeId[MD5_HEX_SIZE]; /** * Set of transaction IDs opened by this client. * @invariant This is a subset of the transaction IDs in the 'transactions' member. */ set<string> openTransactions; ScalarMessage dataReader; TransactionPtr currentTransaction; string currentTimestamp; Client(struct ev_loop *loop, const FileDescriptor &fd) : EventedMessageClient(loop, fd) { type = UNINITIALIZED; dataReader.setMaxSize(1024 * 128); } }; typedef shared_ptr<Client> ClientPtr; typedef map<string, TransactionPtr> TransactionMap; string dir; gid_t gid; string dirPermissions; mode_t filePermissions; RemoteSender remoteSender; ChangeNotifierPtr changeNotifier; ev::timer garbageCollectionTimer; ev::timer sinkFlushingTimer; ev::timer exitTimer; TransactionMap transactions; LogSinkCache logSinkCache; /** * @invariant * inactiveLogSinks is sorted from oldest to youngest (by lastTime member). * for all s in inactiveLogSinks: * s.opened == 0 * inactiveLogSinks.size() == inactiveLogSinksCount */ list<LogSinkPtr> inactiveLogSinks; int inactiveLogSinksCount; RandomGenerator randomGenerator; bool refuseNewConnections; bool exitRequested; unsigned long long exitBeginTime; void sendErrorToClient(Client *client, const string &message) { client->writeArrayMessage("error", message.c_str(), NULL); logError(client, message); } bool expectingArgumentsCount(Client *client, const vector<StaticString> &args, unsigned int size) { if (args.size() == size) { return true; } else { sendErrorToClient(client, "Invalid number of arguments"); client->disconnect(); return false; } } bool expectingLoggerType(Client *client) { if (client->type == LOGGER) { return true; } else { sendErrorToClient(client, "Client not initialized as logger"); client->disconnect(); return false; } } bool checkWhetherConnectionAreAcceptable(Client *client) { if (refuseNewConnections) { client->writeArrayMessage("server shutting down", NULL); client->disconnect(); return false; } else { return true; } } bool validTxnId(const StaticString &txnId) const { // must contain timestamp // must contain separator // must contain random id // must not be too large return !txnId.empty(); } bool validUnionStationKey(const StaticString &key) const { // must be hexadecimal // must not be too large return !key.empty(); } bool validLogContent(const StaticString &data) const { const char *current = data.c_str(); const char *end = current + data.size(); while (current < end) { char c = *current; if ((c < 1 && c > 126) || c == '\n' || c == '\r') { return false; } current++; } return true; } bool validTimestamp(const StaticString &timestamp) const { // must be hexadecimal // must not be too large return true; } bool supportedCategory(const StaticString &category) const { return category == "requests" || category == "processes" || category == "exceptions"; } time_t extractTimestamp(const StaticString &txnId) const { const char *timestampEnd = (const char *) memchr(txnId.c_str(), '-', txnId.size()); if (timestampEnd == NULL) { return 0; } else { time_t timestamp = hexatriToULL( StaticString(txnId.c_str(), timestampEnd - txnId.c_str()) ); return timestamp * 60; } } void appendVersionAndGroupId(string &output, const StaticString &groupName) const { md5_state_t state; md5_byte_t digest[MD5_SIZE]; char checksum[MD5_HEX_SIZE]; output.append("/1/", 3); md5_init(&state); md5_append(&state, (const md5_byte_t *) groupName.data(), groupName.size()); md5_finish(&state, digest); toHex(StaticString((const char *) digest, MD5_SIZE), checksum); output.append(checksum, MD5_HEX_SIZE); } string determineFilename(const StaticString &groupName, const char *nodeId, const StaticString &category, const StaticString &txnId = "") const { time_t timestamp; struct tm tm; char time_str[14]; if (!txnId.empty()) { timestamp = extractTimestamp(txnId); gmtime_r(&timestamp, &tm); strftime(time_str, sizeof(time_str), "%Y/%m/%d/%H", &tm); } string filename; filename.reserve(dir.size() + (3 + MD5_HEX_SIZE) // version and group ID + 1 // "/" + MD5_HEX_SIZE // node ID + 1 // "/" + category.size() + 1 // "/" + sizeof(time_str) // including null terminator, which we use as space for "/" + sizeof("log.txt") ); filename.append(dir); appendVersionAndGroupId(filename, groupName); filename.append(1, '/'); filename.append(nodeId, MD5_HEX_SIZE); filename.append(1, '/'); filename.append(category.c_str(), category.size()); if (!txnId.empty()) { filename.append(1, '/'); filename.append(time_str); filename.append("/log.txt"); } return filename; } void setupGroupAndNodeDir(const StaticString &groupName, const StaticString &nodeName, const char *nodeId) { string filename, groupDir, nodeDir; filename.append(dir); appendVersionAndGroupId(filename, groupName); groupDir = filename; filename.append("/"); filename.append(nodeId, MD5_HEX_SIZE); nodeDir = filename; createFile(groupDir + "/group_name.txt", groupName, filePermissions, USER_NOT_GIVEN, GROUP_NOT_GIVEN, false); if (getFileType(groupDir + "/uuid.txt") == FT_NONEXISTANT) { createFile(groupDir + "/uuid.txt", randomGenerator.generateAsciiString(24), filePermissions, USER_NOT_GIVEN, GROUP_NOT_GIVEN, false); } createFile(nodeDir + "/node_name.txt", nodeName, filePermissions, USER_NOT_GIVEN, GROUP_NOT_GIVEN, false); if (getFileType(nodeDir + "/uuid.txt") == FT_NONEXISTANT) { createFile(nodeDir + "/uuid.txt", randomGenerator.generateAsciiString(24), filePermissions, USER_NOT_GIVEN, GROUP_NOT_GIVEN, false); } } bool openLogFileWithCache(const string &filename, LogSinkPtr &theLogSink) { string cacheKey = "file:" + filename; LogSinkCache::iterator it = logSinkCache.find(cacheKey); if (it == logSinkCache.end()) { trimLogSinkCache(MAX_LOG_SINK_CACHE_SIZE - 1); makeDirTree(extractDirName(filename), dirPermissions, USER_NOT_GIVEN, gid); theLogSink.reset(new LogFile(this, filename, filePermissions)); pair<LogSinkCache::iterator, bool> p = logSinkCache.insert(make_pair(cacheKey, theLogSink)); theLogSink->cacheIterator = p.first; theLogSink->opened = 1; return false; } else { theLogSink = it->second; theLogSink->opened++; if (theLogSink->opened == 1) { inactiveLogSinks.erase(theLogSink->inactiveLogSinksIterator); inactiveLogSinksCount--; } return true; } } void openRemoteSink(const StaticString &unionStationKey, const string &nodeName, const string &category, LogSinkPtr &theLogSink) { string cacheKey = "remote:"; cacheKey.append(unionStationKey.c_str(), unionStationKey.size()); cacheKey.append(1, '\0'); cacheKey.append(nodeName); cacheKey.append(1, '\0'); cacheKey.append(category); LogSinkCache::iterator it = logSinkCache.find(cacheKey); if (it == logSinkCache.end()) { trimLogSinkCache(MAX_LOG_SINK_CACHE_SIZE - 1); theLogSink.reset(new RemoteSink(this, unionStationKey, nodeName, category)); pair<LogSinkCache::iterator, bool> p = logSinkCache.insert(make_pair(cacheKey, theLogSink)); theLogSink->cacheIterator = p.first; theLogSink->opened = 1; } else { theLogSink = it->second; theLogSink->opened++; if (theLogSink->opened == 1) { inactiveLogSinks.erase(theLogSink->inactiveLogSinksIterator); inactiveLogSinksCount--; } } } /** * 'Closes' the given log sink. It's not actually deleted from memory; * instead it's marked as inactive and cached for later use. May be * deleted later when resources are low. * * No need to call this manually. Automatically called by Transaction's * destructor. */ void closeLogSink(const LogSinkPtr &logSink) { logSink->opened--; assert(logSink->opened >= 0); logSink->lastUsed = ev_now(getLoop()); if (logSink->opened == 0) { inactiveLogSinks.push_back(logSink); logSink->inactiveLogSinksIterator = inactiveLogSinks.end(); logSink->inactiveLogSinksIterator--; inactiveLogSinksCount++; trimLogSinkCache(MAX_LOG_SINK_CACHE_SIZE); } } /** Try to reduce the log sink cache size to the given size. */ void trimLogSinkCache(unsigned int size) { while (!inactiveLogSinks.empty() && logSinkCache.size() > size) { const LogSinkPtr logSink = inactiveLogSinks.front(); inactiveLogSinks.pop_front(); inactiveLogSinksCount--; logSinkCache.erase(logSink->cacheIterator); } } bool writeLogEntry(Client *client, const TransactionPtr &transaction, const StaticString &timestamp, const StaticString &data) { if (transaction->discarded) { return true; } if (OXT_UNLIKELY( !validLogContent(data) )) { if (client != NULL) { sendErrorToClient(client, "Log entry data contains an invalid character."); client->disconnect(); } return false; } if (OXT_UNLIKELY( !validTimestamp(timestamp) )) { if (client != NULL) { sendErrorToClient(client, "Log entry timestamp is invalid."); client->disconnect(); } return false; } char writeCountStr[sizeof(unsigned int) * 2 + 1]; integerToHexatri(transaction->writeCount, writeCountStr); transaction->writeCount++; transaction->data.reserve(transaction->data.size() + transaction->txnId.size() + 1 + timestamp.size() + 1 + strlen(writeCountStr) + 1 + data.size() + 1); transaction->data.append(transaction->txnId); transaction->data.append(" "); transaction->data.append(timestamp); transaction->data.append(" "); transaction->data.append(writeCountStr); transaction->data.append(" "); transaction->data.append(data); transaction->data.append("\n"); return true; } void writeDetachEntry(Client *client, const TransactionPtr &transaction) { char timestamp[2 * sizeof(unsigned long long) + 1]; // Must use System::getUsec() here instead of ev_now() because the // precision of the time is very important. integerToHexatri<unsigned long long>(SystemTime::getUsec(), timestamp); writeDetachEntry(client, transaction, timestamp); } void writeDetachEntry(Client *client, const TransactionPtr &transaction, const StaticString &timestamp) { writeLogEntry(client, transaction, timestamp, "DETACH"); } bool requireRights(Client *client, Account::Rights rights) { if (client->messageServer.account->hasRights(rights)) { return true; } else { P_TRACE(2, "Security error: insufficient rights to execute this command."); client->writeArrayMessage("SecurityException", "Insufficient rights to execute this command.", NULL); client->disconnect(); return false; } } bool isDirectory(const string &dir, struct dirent *entry) const { #ifdef __sun__ string path = dir; path.append("/"); path.append(entry->d_name); return getFileType(path) == FT_DIRECTORY; #else return entry->d_type == DT_DIR; #endif } bool looksLikeNumber(const char *str) const { const char *current = str; while (*current != '\0') { char c = *current; if (!(c >= '0' && c <= '9')) { return false; } current++; } return true; } bool getLastEntryInDirectory(const string &path, string &result) const { DIR *dir = opendir(path.c_str()); struct dirent *entry; vector<string> subdirs; if (dir == NULL) { int e = errno; throw FileSystemException("Cannot open directory " + path, e, path); } while ((entry = readdir(dir)) != NULL) { if (isDirectory(path, entry) && looksLikeNumber(entry->d_name)) { subdirs.push_back(entry->d_name); } } closedir(dir); if (subdirs.empty()) { return false; } vector<string>::const_iterator it = subdirs.begin(); vector<string>::const_iterator end = subdirs.end(); vector<string>::const_iterator largest_it = subdirs.begin(); int largest = atoi(subdirs[0]); for (it++; it != end; it++) { const string &subdir = *it; int number = atoi(subdir.c_str()); if (number > largest) { largest_it = it; largest = number; } } result = *largest_it; return true; } static void pendingDataFlushed(EventedClient *_client) { Client *client = (Client *) _client; LoggingServer *self = (LoggingServer *) client->userData; client->onPendingDataFlushed = NULL; if (OXT_UNLIKELY( client->type != WATCHER )) { P_WARN("BUG: pendingDataFlushed() called even though client type is not WATCHER."); client->disconnect(); } else if (self->changeNotifier != NULL) { self->changeNotifier->addClient(client->detach()); } else { client->disconnect(); } } /* Release all inactive log sinks that have been inactive for more than * GARBAGE_COLLECTION_TIMEOUT seconds. */ void releaseInactiveLogSinks(ev_tstamp now) { bool done = false; while (!done && !inactiveLogSinks.empty()) { const LogSinkPtr logSink = inactiveLogSinks.front(); if (now - logSink->lastUsed >= GARBAGE_COLLECTION_TIMEOUT) { inactiveLogSinks.pop_front(); inactiveLogSinksCount--; logSinkCache.erase(logSink->cacheIterator); } else { done = true; } } } void garbageCollect(ev::timer &timer, int revents) { P_DEBUG("Garbage collection time"); releaseInactiveLogSinks(ev_now(getLoop())); } void sinkFlushTimeout(ev::timer &timer, int revents) { P_TRACE(2, "Flushing all sinks (periodic action)"); LogSinkCache::iterator it; LogSinkCache::iterator end = logSinkCache.end(); ev_tstamp now = ev_now(getLoop()); for (it = logSinkCache.begin(); it != end; it++) { LogSink *sink = it->second.get(); // Flush log file sinks every 15 seconds, // remote sinks every 60 seconds. if (sink->isRemote()) { if (now - sink->lastFlushed >= 60) { sink->flush(); } } else { sink->flush(); } } } void flushAllSinks() { P_TRACE(2, "Flushing all sinks"); LogSinkCache::iterator it; LogSinkCache::iterator end = logSinkCache.end(); for (it = logSinkCache.begin(); it != end; it++) { LogSink *sink = it->second.get(); sink->flush(); } } void exitTimerTimeout(ev::timer &timer, int revents) { if (SystemTime::getMsec() >= exitBeginTime + 5000) { exitTimer.stop(); exitRequested = false; refuseNewConnections = false; ev_unloop(getLoop(), EVUNLOOP_ONE); } } protected: virtual EventedClient *createClient(const FileDescriptor &fd) { return new Client(getLoop(), fd); } virtual bool onMessageReceived(EventedMessageClient *_client, const vector<StaticString> &args) { Client *client = (Client *) _client; if (args[0] == "log") { if (OXT_UNLIKELY( !expectingArgumentsCount(client, args, 3) || !expectingLoggerType(client) )) { return true; } string txnId = args[1]; string timestamp = args[2]; TransactionMap::iterator it = transactions.find(txnId); if (OXT_UNLIKELY( it == transactions.end() )) { sendErrorToClient(client, "Cannot log data: transaction does not exist"); client->disconnect(); } else { set<string>::iterator sit = client->openTransactions.find(txnId); if (OXT_UNLIKELY( sit == client->openTransactions.end() )) { sendErrorToClient(client, "Cannot log data: transaction not opened in this connection"); client->disconnect(); return true; } // Expecting the log data in a scalar message. client->currentTransaction = it->second; client->currentTimestamp = timestamp; return false; } } else if (args[0] == "openTransaction") { if (OXT_UNLIKELY( !expectingArgumentsCount(client, args, 8) || !expectingLoggerType(client) )) { return true; } string txnId = args[1]; StaticString groupName = args[2]; StaticString nodeName = args[3]; StaticString category = args[4]; StaticString timestamp = args[5]; StaticString unionStationKey = args[6]; bool crashProtect = args[7] == "true"; if (OXT_UNLIKELY( !validTxnId(txnId) )) { sendErrorToClient(client, "Invalid transaction ID format"); client->disconnect(); return true; } if (!unionStationKey.empty() && OXT_UNLIKELY( !validUnionStationKey(unionStationKey) )) { sendErrorToClient(client, "Invalid Union Station key format"); client->disconnect(); return true; } if (OXT_UNLIKELY( client->openTransactions.find(txnId) != client->openTransactions.end() )) { sendErrorToClient(client, "Cannot open transaction: transaction already opened in this connection"); client->disconnect(); return true; } const char *nodeId; if (nodeName.empty()) { nodeName = client->nodeName; nodeId = client->nodeId; } else { nodeId = NULL; } TransactionMap::iterator it = transactions.find(txnId); TransactionPtr transaction; if (it == transactions.end()) { if (OXT_UNLIKELY( !supportedCategory(category) )) { sendErrorToClient(client, "Unsupported category"); client->disconnect(); return true; } transaction.reset(new Transaction(this)); if (unionStationKey.empty()) { char tempNodeId[MD5_HEX_SIZE]; if (nodeId == NULL) { md5_state_t state; md5_byte_t digest[MD5_SIZE]; md5_init(&state); md5_append(&state, (const md5_byte_t *) nodeName.data(), nodeName.size()); md5_finish(&state, digest); toHex(StaticString((const char *) digest, MD5_SIZE), tempNodeId); nodeId = tempNodeId; } string filename = determineFilename(groupName, nodeId, category, txnId); if (!openLogFileWithCache(filename, transaction->logSink)) { setupGroupAndNodeDir(groupName, nodeName, nodeId); } } else { openRemoteSink(unionStationKey, client->nodeName, category, transaction->logSink); } transaction->txnId = txnId; transaction->dataStoreId = DataStoreId(groupName, nodeName, category); transaction->writeCount = 0; transaction->refcount = 0; transaction->crashProtect = crashProtect; transaction->discarded = false; transactions.insert(make_pair(txnId, transaction)); } else { transaction = it->second; if (OXT_UNLIKELY( transaction->getGroupName() != groupName )) { sendErrorToClient(client, "Cannot open transaction: transaction already opened with a different group name"); client->disconnect(); return true; } if (OXT_UNLIKELY( transaction->getNodeName() != nodeName )) { sendErrorToClient(client, "Cannot open transaction: transaction already opened with a different node name"); client->disconnect(); return true; } if (OXT_UNLIKELY( transaction->getCategory() != category )) { sendErrorToClient(client, "Cannot open transaction: transaction already opened with a different category name"); client->disconnect(); return true; } } client->openTransactions.insert(txnId); transaction->refcount++; writeLogEntry(client, transaction, timestamp, "ATTACH"); } else if (args[0] == "closeTransaction") { if (OXT_UNLIKELY( !expectingArgumentsCount(client, args, 3) || !expectingLoggerType(client) )) { return true; } string txnId = args[1]; StaticString timestamp = args[2]; TransactionMap::iterator it = transactions.find(txnId); if (OXT_UNLIKELY( it == transactions.end() )) { sendErrorToClient(client, "Cannot close transaction " + txnId + ": transaction does not exist"); client->disconnect(); } else { TransactionPtr &transaction = it->second; set<string>::const_iterator sit = client->openTransactions.find(txnId); if (OXT_UNLIKELY( sit == client->openTransactions.end() )) { sendErrorToClient(client, "Cannot close transaction " + txnId + ": transaction not opened in this connection"); client->disconnect(); return true; } else { client->openTransactions.erase(sit); } writeDetachEntry(client, transaction, timestamp); transaction->refcount--; assert(transaction->refcount >= 0); if (transaction->refcount == 0) { transactions.erase(it); } } } else if (args[0] == "init") { if (OXT_UNLIKELY( client->type != UNINITIALIZED )) { sendErrorToClient(client, "Already initialized"); client->disconnect(); return true; } if (OXT_UNLIKELY( !expectingArgumentsCount(client, args, 2) )) { return true; } if (OXT_UNLIKELY( !checkWhetherConnectionAreAcceptable(client) )) { return true; } StaticString nodeName = args[1]; client->nodeName = nodeName; md5_state_t state; md5_byte_t digest[MD5_SIZE]; md5_init(&state); md5_append(&state, (const md5_byte_t *) nodeName.data(), nodeName.size()); md5_finish(&state, digest); toHex(StaticString((const char *) digest, MD5_SIZE), client->nodeId); client->type = LOGGER; client->writeArrayMessage("ok", NULL); } else if (args[0] == "watchChanges") { if (OXT_UNLIKELY( !checkWhetherConnectionAreAcceptable(client) )) { return true; } if (OXT_UNLIKELY( client->type != UNINITIALIZED )) { sendErrorToClient(client, "This command cannot be invoked " "if the 'init' command is already invoked."); client->disconnect(); return true; } client->type = WATCHER; client->notifyReads(false); discardReadData(); // Add to the change notifier after all pending data // has been written out. client->onPendingDataFlushed = pendingDataFlushed; client->writeArrayMessage("ok", NULL); } else if (args[0] == "flush") { flushAllSinks(); client->writeArrayMessage("ok", NULL); } else if (args[0] == "info") { stringstream stream; dump(stream); client->writeArrayMessage("info", stream.str().c_str(), NULL); } else if (args[0] == "ping") { client->writeArrayMessage("pong", NULL); } else if (args[0] == "exit") { if (!requireRights(client, Account::EXIT)) { client->disconnect(); return true; } if (args.size() == 2 && args[1] == "immediately") { // Immediate exit. ev_unloop(getLoop(), EVUNLOOP_ONE); } else if (args.size() == 2 && args[1] == "semi-gracefully") { // Semi-graceful exit: refuse new connections, shut down // a few seconds after the last client has disconnected. refuseNewConnections = true; exitRequested = true; } else { // Graceful exit: shut down a few seconds after the // last client has disconnected. client->writeArrayMessage("Passed security", NULL); client->writeArrayMessage("exit command received", NULL); exitRequested = true; } client->disconnect(); } else { sendErrorToClient(client, "Unknown command '" + args[0] + "'"); client->disconnect(); } return true; } virtual pair<size_t, bool> onOtherDataReceived(EventedMessageClient *_client, const char *data, size_t size) { // In here we read the scalar message that's expected to come // after the "log" command. Client *client = (Client *) _client; size_t consumed = client->dataReader.feed(data, size); if (client->dataReader.done()) { writeLogEntry(client, client->currentTransaction, client->currentTimestamp, client->dataReader.value()); client->currentTransaction.reset(); client->dataReader.reset(); return make_pair(consumed, true); } else { return make_pair(consumed, false); } } virtual void onNewClient(EventedClient *client) { if (exitRequested && exitTimer.is_active()) { exitTimer.stop(); } EventedMessageServer::onNewClient(client); } virtual void onClientDisconnected(EventedClient *_client) { EventedMessageServer::onClientDisconnected(_client); Client *client = (Client *) _client; set<string>::const_iterator sit; set<string>::const_iterator send = client->openTransactions.end(); // Close any transactions that this client had opened. for (sit = client->openTransactions.begin(); sit != send; sit++) { const string &txnId = *sit; TransactionMap::iterator it = transactions.find(txnId); if (OXT_UNLIKELY( it == transactions.end() )) { P_ERROR("Bug: client->openTransactions is not a subset of this->transactions!"); abort(); } TransactionPtr &transaction = it->second; if (transaction->crashProtect) { writeDetachEntry(client, transaction); } else { transaction->discard(); } transaction->refcount--; assert(transaction->refcount >= 0); if (transaction->refcount == 0) { transactions.erase(it); } } client->openTransactions.clear(); // Possibly start exit timer. if (exitRequested && getClients().empty()) { exitTimer.start(); /* Using SystemTime here instead of setting a correct * timeout directly on the timer, so that we can * manipulate the clock in LoggingServer unit tests. */ exitBeginTime = SystemTime::getMsec(); } } public: LoggingServer(struct ev_loop *loop, FileDescriptor fd, const AccountsDatabasePtr &accountsDatabase, const string &dir, const string &permissions = DEFAULT_ANALYTICS_LOG_PERMISSIONS, gid_t gid = GROUP_NOT_GIVEN, const string &unionStationGatewayAddress = DEFAULT_UNION_STATION_GATEWAY_ADDRESS, unsigned short unionStationGatewayPort = DEFAULT_UNION_STATION_GATEWAY_PORT, const string &unionStationGatewayCert = "") : EventedMessageServer(loop, fd, accountsDatabase), remoteSender(unionStationGatewayAddress, unionStationGatewayPort, unionStationGatewayCert), garbageCollectionTimer(loop), sinkFlushingTimer(loop), exitTimer(loop) { this->dir = dir; this->gid = gid; dirPermissions = permissions; filePermissions = parseModeString(permissions) & ~(S_IXUSR | S_IXGRP | S_IXOTH); garbageCollectionTimer.set<LoggingServer, &LoggingServer::garbageCollect>(this); garbageCollectionTimer.start(GARBAGE_COLLECTION_TIMEOUT, GARBAGE_COLLECTION_TIMEOUT); sinkFlushingTimer.set<LoggingServer, &LoggingServer::sinkFlushTimeout>(this); sinkFlushingTimer.start(15, 15); exitTimer.set<LoggingServer, &LoggingServer::exitTimerTimeout>(this); exitTimer.set(0.05, 0.05); refuseNewConnections = false; exitRequested = false; inactiveLogSinksCount = 0; } ~LoggingServer() { TransactionMap::iterator it, end = transactions.end(); for (it = transactions.begin(); it != end; it++) { TransactionPtr &transaction = it->second; if (transaction->crashProtect) { writeDetachEntry(NULL, transaction); } else { transaction->discard(); } } // Invoke destructors, causing all transactions and log sinks to // be flushed before RemoteSender and ChangeNotifier are being // destroyed. transactions.clear(); logSinkCache.clear(); inactiveLogSinks.clear(); } void setChangeNotifier(const ChangeNotifierPtr &_changeNotifier) { changeNotifier = _changeNotifier; changeNotifier->getLastPos = boost::bind(&LoggingServer::getLastPos, this, _1, _2, _3); } string getLastPos(const StaticString &groupName, const StaticString &nodeName, const StaticString &category) const { md5_state_t state; md5_byte_t digest[MD5_SIZE]; char nodeId[MD5_HEX_SIZE]; md5_init(&state); md5_append(&state, (const md5_byte_t *) nodeName.data(), nodeName.size()); md5_finish(&state, digest); toHex(StaticString((const char *) digest, MD5_SIZE), nodeId); string dir = determineFilename(groupName, nodeId, category); string subdir, component; subdir.reserve(13); // It's a string that looks like: "2010/06/24/12" try { // Loop 4 times to process year, month, day, hour. for (int i = 0; i < 4; i++) { bool found = getLastEntryInDirectory(dir, component); if (!found) { return string(); } dir.append("/"); dir.append(component); if (i != 0) { subdir.append("/"); } subdir.append(component); } // After the loop, new dir == old dir + "/" + subdir } catch (const SystemException &e) { if (e.code() == ENOENT) { return string(); } else { throw; } } string &filename = dir; filename.append("/log.txt"); struct stat buf; if (stat(filename.c_str(), &buf) == -1) { if (errno == ENOENT) { return string(); } else { int e = errno; throw FileSystemException("Cannot stat() " + filename, e, filename); } } else { return subdir + "/" + toString(buf.st_size); } } void dump(ostream &stream) const { TransactionMap::const_iterator it; TransactionMap::const_iterator end = transactions.end(); stream << "Number of clients: " << getClients().size() << "\n"; stream << "Open transactions: " << transactions.size() << "\n"; for (it = transactions.begin(); it != end; it++) { const TransactionPtr &transaction = it->second; transaction->dump(stream); } LogSinkCache::const_iterator sit; LogSinkCache::const_iterator send = logSinkCache.end(); stream << "Log sinks: " << logSinkCache.size() << " (" << inactiveLogSinksCount << " inactive)\n"; for (sit = logSinkCache.begin(); sit != send; sit++) { const LogSinkPtr &logSink = sit->second; logSink->dump(stream); } } }; typedef shared_ptr<LoggingServer> LoggingServerPtr; } // namespace Passenger #endif /* _PASSENGER_LOGGING_SERVER_H_ */
{ "content_hash": "e27d7ae43c9b28a9b271baf61b6ec82a", "timestamp": "", "source": "github", "line_count": 1320, "max_line_length": 104, "avg_line_length": 28.78939393939394, "alnum_prop": 0.6614125572338297, "repo_name": "fabiokung/passenger-debian", "id": "a54567ba01c97058cc3b10773ff3ac2325604c3a", "size": "39246", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ext/common/LoggingAgent/LoggingServer.h", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "544647" }, { "name": "C++", "bytes": "3145124" }, { "name": "JavaScript", "bytes": "148" }, { "name": "PHP", "bytes": "99" }, { "name": "Python", "bytes": "6307" }, { "name": "Ruby", "bytes": "370434" } ], "symlink_target": "" }
<!DOCTYPE html > <html> <head> <title>AJMaterial101 - net.liftmodules.FoBo.AJMaterial101</title> <meta name="description" content="AJMaterial101 - net.liftmodules.FoBo.AJMaterial101" /> <meta name="keywords" content="AJMaterial101 net.liftmodules.FoBo.AJMaterial101" /> <meta http-equiv="content-type" content="text/html; charset=UTF-8" /> <link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" /> <link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" /> <script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script> <script type="text/javascript" src="../../../lib/jquery-ui.js"></script> <script type="text/javascript" src="../../../lib/template.js"></script> <script type="text/javascript" src="../../../lib/tools.tooltip.js"></script> <script type="text/javascript"> if(top === self) { var url = '../../../index.html'; var hash = 'net.liftmodules.FoBo.package$$AJMaterial101$'; var anchor = window.location.hash; var anchor_opt = ''; if (anchor.length >= 1) anchor_opt = '@' + anchor.substring(1); window.location.href = url + '#' + hash + anchor_opt; } </script> </head> <body class="value"> <div id="definition"> <img alt="Object" src="../../../lib/object_big.png" /> <p id="owner"><a href="../../package.html" class="extype" name="net">net</a>.<a href="../package.html" class="extype" name="net.liftmodules">liftmodules</a>.<a href="package.html" class="extype" name="net.liftmodules.FoBo">FoBo</a></p> <h1>AJMaterial101</h1><h3><span class="morelinks"><div>Related Doc: <a href="package.html" class="extype" name="net.liftmodules.FoBo">package FoBo</a> </div></span></h3><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> </div> <h4 id="signature" class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">object</span> </span> <span class="symbol"> <span class="name deprecated" title="Deprecated: (Since version 1.6.0) Use FoBo.Toolkit.Init=FoBo.Toolkit.AJMaterial101">AJMaterial101</span><span class="result"> extends <a href="package$$FoBoToolkit.html" class="extype" name="net.liftmodules.FoBo.FoBoToolkit">FoBoToolkit</a> with <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Product" class="extype" target="_top">Product</a> with <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Serializable" class="extype" target="_top">Serializable</a></span> </span> </h4> <div id="comment" class="fullcommenttop"><div class="comment cmt"><p>Enable usage of Angular-Material version 1&#8228;0&#8228;1 in your bootstrap liftweb Boot.</p></div><dl class="attributes block"> <dt>Annotations</dt><dd> <span class="name">@deprecated</span> </dd><dt>Deprecated</dt><dd class="cmt"><p><i>(Since version 1.6.0)</i> Use FoBo.Toolkit.Init=FoBo.Toolkit.AJMaterial101</p></dd><dt>Version</dt><dd><p>1.0.1 <b>Example:</b></p><pre>FoBo.InitParam.ToolKit=FoBo.AJMaterial101</pre></dd><dt>Since</dt><dd><p>v1.5</p></dd></dl><div class="toggleContainer block"> <span class="toggle">Linear Supertypes</span> <div class="superTypes hiddenContent"><a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Serializable" class="extype" target="_top">Serializable</a>, <span class="extype" name="java.io.Serializable">Serializable</span>, <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Product" class="extype" target="_top">Product</a>, <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Equals" class="extype" target="_top">Equals</a>, <a href="package$$FoBoToolkit.html" class="extype" name="net.liftmodules.FoBo.FoBoToolkit">FoBoToolkit</a>, <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.AnyRef" class="extype" target="_top">AnyRef</a>, <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Any" class="extype" target="_top">Any</a></div> </div></div> <div id="mbrsel"> <div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div> <div id="order"> <span class="filtertype">Ordering</span> <ol> <li class="alpha in"><span>Alphabetic</span></li> <li class="inherit out"><span>By inheritance</span></li> </ol> </div> <div id="ancestors"> <span class="filtertype">Inherited<br /> </span> <ol id="linearization"> <li class="in" name="net.liftmodules.FoBo.AJMaterial101"><span>AJMaterial101</span></li><li class="in" name="scala.Serializable"><span>Serializable</span></li><li class="in" name="java.io.Serializable"><span>Serializable</span></li><li class="in" name="scala.Product"><span>Product</span></li><li class="in" name="scala.Equals"><span>Equals</span></li><li class="in" name="net.liftmodules.FoBo.FoBoToolkit"><span>FoBoToolkit</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li> </ol> </div><div id="ancestors"> <span class="filtertype"></span> <ol> <li class="hideall out"><span>Hide All</span></li> <li class="showall in"><span>Show all</span></li> </ol> </div> <div id="visbl"> <span class="filtertype">Visibility</span> <ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol> </div> </div> <div id="template"> <div id="allMembers"> <div id="values" class="values members"> <h3>Value Members</h3> <ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="!=(x$1:Any):Boolean"></a> <a id="!=(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Any" class="extype" target="_top">Any</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@!=(x$1:Any):Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="##():Int"></a> <a id="##():Int"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Int" class="extype" target="_top">Int</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@##():Int" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="==(x$1:Any):Boolean"></a> <a id="==(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Any" class="extype" target="_top">Any</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@==(x$1:Any):Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="asInstanceOf[T0]:T0"></a> <a id="asInstanceOf[T0]:T0"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@asInstanceOf[T0]:T0" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="clone():Object"></a> <a id="clone():AnyRef"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">clone</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.AnyRef" class="extype" target="_top">AnyRef</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@clone():Object" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="eq(x$1:AnyRef):Boolean"></a> <a id="eq(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">eq</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.AnyRef" class="extype" target="_top">AnyRef</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@eq(x$1:AnyRef):Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="equals(x$1:Any):Boolean"></a> <a id="equals(Any):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">equals</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Any" class="extype" target="_top">Any</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@equals(x$1:Any):Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="finalize():Unit"></a> <a id="finalize():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier"></span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">finalize</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@finalize():Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="symbol">classOf[java.lang.Throwable]</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="getClass():Class[_]"></a> <a id="getClass():Class[_]"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@getClass():Class[_]" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div> </li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="isInstanceOf[T0]:Boolean"></a> <a id="isInstanceOf[T0]:Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@isInstanceOf[T0]:Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div> </li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="ne(x$1:AnyRef):Boolean"></a> <a id="ne(AnyRef):Boolean"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">ne</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.AnyRef" class="extype" target="_top">AnyRef</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Boolean" class="extype" target="_top">Boolean</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@ne(x$1:AnyRef):Boolean" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="notify():Unit"></a> <a id="notify():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">notify</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@notify():Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="notifyAll():Unit"></a> <a id="notifyAll():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">notifyAll</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@notifyAll():Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="synchronized[T0](x$1:=&gt;T0):T0"></a> <a id="synchronized[T0](⇒T0):T0"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@synchronized[T0](x$1:=&gt;T0):T0" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait():Unit"></a> <a id="wait():Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">()</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@wait():Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait(x$1:Long,x$2:Int):Unit"></a> <a id="wait(Long,Int):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Long" class="extype" target="_top">Long</a></span>, <span name="arg1">arg1: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Int" class="extype" target="_top">Int</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@wait(x$1:Long,x$2:Int):Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped"> <a id="wait(x$1:Long):Unit"></a> <a id="wait(Long):Unit"></a> <h4 class="signature"> <span class="modifier_kind"> <span class="modifier">final </span> <span class="kind">def</span> </span> <span class="symbol"> <span class="name">wait</span><span class="params">(<span name="arg0">arg0: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Long" class="extype" target="_top">Long</a></span>)</span><span class="result">: <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Unit" class="extype" target="_top">Unit</a></span> </span> </h4><span class="permalink"> <a href="../../../index.html#net.liftmodules.FoBo.package$$AJMaterial101$@wait(x$1:Long):Unit" title="Permalink" target="_top"> <img src="../../../lib/permalink.png" alt="Permalink" /> </a> </span> <div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd> <span class="name">@throws</span><span class="args">(<span> <span class="defval" name="classOf[java.lang.InterruptedException]">...</span> </span>)</span> </dd></dl></div> </li></ol> </div> </div> <div id="inheritedMembers"> <div class="parent" name="scala.Serializable"> <h3>Inherited from <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Serializable" class="extype" target="_top">Serializable</a></h3> </div><div class="parent" name="java.io.Serializable"> <h3>Inherited from <span class="extype" name="java.io.Serializable">Serializable</span></h3> </div><div class="parent" name="scala.Product"> <h3>Inherited from <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Product" class="extype" target="_top">Product</a></h3> </div><div class="parent" name="scala.Equals"> <h3>Inherited from <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Equals" class="extype" target="_top">Equals</a></h3> </div><div class="parent" name="net.liftmodules.FoBo.FoBoToolkit"> <h3>Inherited from <a href="package$$FoBoToolkit.html" class="extype" name="net.liftmodules.FoBo.FoBoToolkit">FoBoToolkit</a></h3> </div><div class="parent" name="scala.AnyRef"> <h3>Inherited from <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.AnyRef" class="extype" target="_top">AnyRef</a></h3> </div><div class="parent" name="scala.Any"> <h3>Inherited from <a href="http://www.scala-lang.org/api/2.11.7/index.html#scala.Any" class="extype" target="_top">Any</a></h3> </div> </div> <div id="groupedMembers"> <div class="group" name="Ungrouped"> <h3>Ungrouped</h3> </div> </div> </div> <div id="tooltip"></div> <div id="footer"> </div> </body> </html>
{ "content_hash": "d2cc1b275e41067d22950ac7cd2101d4", "timestamp": "", "source": "github", "line_count": 462, "max_line_length": 798, "avg_line_length": 59.285714285714285, "alnum_prop": 0.5929536327126689, "repo_name": "karma4u101/FoBo-Demo", "id": "de17fdeea69e09d2e564851c00a55297cc90b5dd", "size": "27404", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fobo-lift-template-demo/src/main/webapp/foboapi/older/v1.7/net/liftmodules/FoBo/package$$AJMaterial101$.html", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "375444" }, { "name": "HTML", "bytes": "168959696" }, { "name": "JavaScript", "bytes": "747776" }, { "name": "Scala", "bytes": "79384" } ], "symlink_target": "" }
"""Data tests for cclib."""
{ "content_hash": "228daa9de2075af935de84d8bb3fadd2", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 27, "avg_line_length": 28, "alnum_prop": 0.6071428571428571, "repo_name": "cclib/cclib", "id": "888af5721a312dca5576ca313aed46d8b1adc27a", "size": "226", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "test/data/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Arc", "bytes": "18395" }, { "name": "C++", "bytes": "21085" }, { "name": "DIGITAL Command Language", "bytes": "31999" }, { "name": "Python", "bytes": "1617128" }, { "name": "Roff", "bytes": "375502" }, { "name": "Shell", "bytes": "1484" }, { "name": "TeX", "bytes": "29388" } ], "symlink_target": "" }
<resources> <!-- Default screen margins, per the Android Design guidelines. --> <dimen name="activity_horizontal_margin">16dp</dimen> <dimen name="activity_vertical_margin">16dp</dimen> <dimen name="activity_tiny">10dp</dimen> <dimen name="activity_small">12dp</dimen> <dimen name="activity_little">14dp</dimen> <dimen name="activity_centre">16dp</dimen> <dimen name="activity_middle">18dp</dimen> <dimen name="activity_big">20dp</dimen> <dimen name="activity_large">22dp</dimen> <dimen name="activity_great">24dp</dimen> <dimen name="activity_huge">26dp</dimen> <dimen name="tiny">10sp</dimen> <dimen name="small">12sp</dimen> <dimen name="little">14sp</dimen> <dimen name="centre">16sp</dimen> <dimen name="middle">18sp</dimen> <dimen name="big">20sp</dimen> <dimen name="large">22sp</dimen> <dimen name="great">24sp</dimen> <dimen name="huge">26sp</dimen> <dimen name="huge_33">33sp</dimen> <dimen name="activity_header_view">0dp</dimen> <dimen name="activity_my_top">10dp</dimen> <dimen name="crop__bar_height">56dp</dimen> <dimen name="activity_my_top2">0dp</dimen> <dimen name="My_avatar">90dp</dimen> <dimen name="Integral_top">120dp</dimen> <dimen name="Main_top_image">35dp</dimen> <dimen name="fab_account">0dp</dimen> <dimen name="Main_banner">86dp</dimen> <dimen name="insurance_height">105dp</dimen> <dimen name="insurance_height3">105dp</dimen> <dimen name="To_loan_height">118dp</dimen> <dimen name="Integral_margin">5dp</dimen> <dimen name="main_top">10dp</dimen> </resources>
{ "content_hash": "cf185c4262fd60fc79e8ad5a065dbad6", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 71, "avg_line_length": 37.34090909090909, "alnum_prop": 0.6585514303104077, "repo_name": "wjWite/Native", "id": "a3ea1cc2376f48b77f7afb7ecc031b6f22a6add9", "size": "1643", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/src/main/res/values/dimens.xml", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4064" }, { "name": "C++", "bytes": "352" }, { "name": "CMake", "bytes": "836" }, { "name": "Java", "bytes": "1066077" } ], "symlink_target": "" }
using System.Collections.Generic; using System.ComponentModel; using System.Windows.Controls.WpfPropertyGrid.Attributes; using Hawk.Core.Connectors; using Hawk.Core.Utils.Plugins; namespace Hawk.ETL.Plugins.Transformers { [XFrmWork("自增键生成", "自动生成一个从起始索引开始的自增新列")] public class AutoIndexTF : TransformerBase { private int currindex = 0; public override bool Init(IEnumerable<IFreeDocument> docus) { currindex = StartIndex; return base.Init(docus); } [LocalizedDisplayName("起始索引")] public int StartIndex { get; set; } public override object TransformData(IFreeDocument document) { return currindex++; } } }
{ "content_hash": "a6c8f3fb64ee421387ed4be2912f7421", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 68, "avg_line_length": 25.03125, "alnum_prop": 0.602996254681648, "repo_name": "zsewqsc/Hawk", "id": "730136a5886a32a948afc6b7b217d1bd7d35106f", "size": "857", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Hawk.ETL/Plugins/Transformers/AutoIndexTF.cs", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C#", "bytes": "791162" } ], "symlink_target": "" }
require 'spec_helper' describe "chorg_revisions", dbscope: :example do let(:site) { cms_site } let(:index_path) { chorg_revisions_revisions_path site.id } let(:new_path) { new_chorg_revisions_revision_path site.id } it "without login" do visit index_path expect(current_path).to eq sns_login_path end it "without auth" do login_ss_user visit index_path expect(status_code).to eq 403 end describe "#index" do context "no items" do it do login_cms_user visit index_path expect(status_code).to eq 200 expect(current_path).to eq index_path expect(page).to have_no_selector("ul.list-items li.list-item nav.tap-menu") end end context "with item" do let(:revision) { create(:revision, site_id: site.id) } it do # ensure that entities has existed. expect(revision).not_to be_nil login_cms_user visit index_path expect(status_code).to eq 200 expect(current_path).to eq index_path expect(page).to have_selector("ul.list-items li.list-item nav.tap-menu") end end end describe "#index" do context "when creates new item" do it do login_cms_user visit new_path within "form#item-form" do fill_in "item[name]", with: "sample" click_button "保存" end expect(status_code).to eq 200 expect(page).to have_no_selector("div#errorExplanation") expect(Chorg::Revision.count).to be > 0 end end context "when creates same named item" do let(:revision) { create(:revision, site_id: site.id) } it do # ensure that entities has existed. expect(revision).not_to be_nil login_cms_user visit new_path within "form#item-form" do fill_in "item[name]", with: revision.name click_button "保存" end expect(status_code).to eq 200 expect(page).to have_selector("div#errorExplanation") expect(Chorg::Revision.count).to be > 0 end end end end
{ "content_hash": "31697839282ee63982274d18d38d025a", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 83, "avg_line_length": 26.3625, "alnum_prop": 0.6055002370791844, "repo_name": "t-wata-cldt/shirasagi", "id": "e7c42cc99fe64fd2c6d972c04ef5c7f5ccec5bec", "size": "2117", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "spec/features/chorg/revisions_spec.rb", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "399562" }, { "name": "CoffeeScript", "bytes": "21804" }, { "name": "HTML", "bytes": "1381503" }, { "name": "JavaScript", "bytes": "4807971" }, { "name": "Ruby", "bytes": "3220121" }, { "name": "Shell", "bytes": "15387" } ], "symlink_target": "" }
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> * * Fixes: * Alan Cox : Numerous verify_area() calls * Alan Cox : Set the ACK bit on a reset * Alan Cox : Stopped it crashing if it closed while * sk->inuse=1 and was trying to connect * (tcp_err()). * Alan Cox : All icmp error handling was broken * pointers passed where wrong and the * socket was looked up backwards. Nobody * tested any icmp error code obviously. * Alan Cox : tcp_err() now handled properly. It * wakes people on errors. poll * behaves and the icmp error race * has gone by moving it into sock.c * Alan Cox : tcp_send_reset() fixed to work for * everything not just packets for * unknown sockets. * Alan Cox : tcp option processing. * Alan Cox : Reset tweaked (still not 100%) [Had * syn rule wrong] * Herp Rosmanith : More reset fixes * Alan Cox : No longer acks invalid rst frames. * Acking any kind of RST is right out. * Alan Cox : Sets an ignore me flag on an rst * receive otherwise odd bits of prattle * escape still * Alan Cox : Fixed another acking RST frame bug. * Should stop LAN workplace lockups. * Alan Cox : Some tidyups using the new skb list * facilities * Alan Cox : sk->keepopen now seems to work * Alan Cox : Pulls options out correctly on accepts * Alan Cox : Fixed assorted sk->rqueue->next errors * Alan Cox : PSH doesn't end a TCP read. Switched a * bit to skb ops. * Alan Cox : Tidied tcp_data to avoid a potential * nasty. * Alan Cox : Added some better commenting, as the * tcp is hard to follow * Alan Cox : Removed incorrect check for 20 * psh * Michael O'Reilly : ack < copied bug fix. * Johannes Stille : Misc tcp fixes (not all in yet). * Alan Cox : FIN with no memory -> CRASH * Alan Cox : Added socket option proto entries. * Also added awareness of them to accept. * Alan Cox : Added TCP options (SOL_TCP) * Alan Cox : Switched wakeup calls to callbacks, * so the kernel can layer network * sockets. * Alan Cox : Use ip_tos/ip_ttl settings. * Alan Cox : Handle FIN (more) properly (we hope). * Alan Cox : RST frames sent on unsynchronised * state ack error. * Alan Cox : Put in missing check for SYN bit. * Alan Cox : Added tcp_select_window() aka NET2E * window non shrink trick. * Alan Cox : Added a couple of small NET2E timer * fixes * Charles Hedrick : TCP fixes * Toomas Tamm : TCP window fixes * Alan Cox : Small URG fix to rlogin ^C ack fight * Charles Hedrick : Rewrote most of it to actually work * Linus : Rewrote tcp_read() and URG handling * completely * Gerhard Koerting: Fixed some missing timer handling * Matthew Dillon : Reworked TCP machine states as per RFC * Gerhard Koerting: PC/TCP workarounds * Adam Caldwell : Assorted timer/timing errors * Matthew Dillon : Fixed another RST bug * Alan Cox : Move to kernel side addressing changes. * Alan Cox : Beginning work on TCP fastpathing * (not yet usable) * Arnt Gulbrandsen: Turbocharged tcp_check() routine. * Alan Cox : TCP fast path debugging * Alan Cox : Window clamping * Michael Riepe : Bug in tcp_check() * Matt Dillon : More TCP improvements and RST bug fixes * Matt Dillon : Yet more small nasties remove from the * TCP code (Be very nice to this man if * tcp finally works 100%) 8) * Alan Cox : BSD accept semantics. * Alan Cox : Reset on closedown bug. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). * Michael Pall : Handle poll() after URG properly in * all cases. * Michael Pall : Undo the last fix in tcp_read_urg() * (multi URG PUSH broke rlogin). * Michael Pall : Fix the multi URG PUSH problem in * tcp_readable(), poll() after URG * works now. * Michael Pall : recv(...,MSG_OOB) never blocks in the * BSD api. * Alan Cox : Changed the semantics of sk->socket to * fix a race and a signal problem with * accept() and async I/O. * Alan Cox : Relaxed the rules on tcp_sendto(). * Yury Shevchuk : Really fixed accept() blocking problem. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for * clients/servers which listen in on * fixed ports. * Alan Cox : Cleaned the above up and shrank it to * a sensible code size. * Alan Cox : Self connect lockup fix. * Alan Cox : No connect to multicast. * Ross Biro : Close unaccepted children on master * socket close. * Alan Cox : Reset tracing code. * Alan Cox : Spurious resets on shutdown. * Alan Cox : Giant 15 minute/60 second timer error * Alan Cox : Small whoops in polling before an * accept. * Alan Cox : Kept the state trace facility since * it's handy for debugging. * Alan Cox : More reset handler fixes. * Alan Cox : Started rewriting the code based on * the RFC's for other useful protocol * references see: Comer, KA9Q NOS, and * for a reference on the difference * between specifications and how BSD * works see the 4.4lite source. * A.N.Kuznetsov : Don't time wait on completion of tidy * close. * Linus Torvalds : Fin/Shutdown & copied_seq changes. * Linus Torvalds : Fixed BSD port reuse to work first syn * Alan Cox : Reimplemented timers as per the RFC * and using multiple timers for sanity. * Alan Cox : Small bug fixes, and a lot of new * comments. * Alan Cox : Fixed dual reader crash by locking * the buffers (much like datagram.c) * Alan Cox : Fixed stuck sockets in probe. A probe * now gets fed up of retrying without * (even a no space) answer. * Alan Cox : Extracted closing code better * Alan Cox : Fixed the closing state machine to * resemble the RFC. * Alan Cox : More 'per spec' fixes. * Jorge Cwik : Even faster checksumming. * Alan Cox : tcp_data() doesn't ack illegal PSH * only frames. At least one pc tcp stack * generates them. * Alan Cox : Cache last socket. * Alan Cox : Per route irtt. * Matt Day : poll()->select() match BSD precisely on error * Alan Cox : New buffers * Marc Tamsky : Various sk->prot->retransmits and * sk->retransmits misupdating fixed. * Fixed tcp_write_timeout: stuck close, * and TCP syn retries gets used now. * Mark Yarvis : In tcp_read_wakeup(), don't send an * ack if state is TCP_CLOSED. * Alan Cox : Look up device on a retransmit - routes may * change. Doesn't yet cope with MSS shrink right * but it's a start! * Marc Tamsky : Closing in closing fixes. * Mike Shaver : RFC1122 verifications. * Alan Cox : rcv_saddr errors. * Alan Cox : Block double connect(). * Alan Cox : Small hooks for enSKIP. * Alexey Kuznetsov: Path MTU discovery. * Alan Cox : Support soft errors. * Alan Cox : Fix MTU discovery pathological case * when the remote claims no mtu! * Marc Tamsky : TCP_CLOSE fix. * Colin (G3TNE) : Send a reset on syn ack replies in * window but wrong (fixes NT lpd problems) * Pedro Roque : Better TCP window handling, delayed ack. * Joerg Reuter : No modification of locked buffers in * tcp_do_retransmit() * Eric Schenk : Changed receiver side silly window * avoidance algorithm to BSD style * algorithm. This doubles throughput * against machines running Solaris, * and seems to result in general * improvement. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * Keith Owens : Do proper merging with partial SKB's in * tcp_do_sendmsg to avoid burstiness. * Eric Schenk : Fix fast close down bug with * shutdown() followed by close(). * Andi Kleen : Make poll agree with SIGIO * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and * lingertime == 0 (RFC 793 ABORT Call) * Hirokazu Takahashi : Use copy_from_user() instead of * csum_and_copy_from_user() if possible. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or(at your option) any later version. * * Description of States: * * TCP_SYN_SENT sent a connection request, waiting for ack * * TCP_SYN_RECV received a connection request, sent ack, * waiting for final ack in three-way handshake. * * TCP_ESTABLISHED connection established * * TCP_FIN_WAIT1 our side has shutdown, waiting to complete * transmission of remaining buffered data * * TCP_FIN_WAIT2 all buffered data sent, waiting for remote * to shutdown * * TCP_CLOSING both sides have shutdown but we still have * data we have to finish sending * * TCP_TIME_WAIT timeout to catch resent junk before entering * closed, can only be entered from FIN_WAIT2 * or CLOSING. Required because the other end * may not have gotten our last ACK causing it * to retransmit the data packet (which we ignore) * * TCP_CLOSE_WAIT remote side has shutdown and is waiting for * us to finish writing our data and to shutdown * (we have to close() to move on to LAST_ACK) * * TCP_LAST_ACK out side has shutdown after remote has * shutdown. There may still be data in our * buffer that we have to finish sending * * TCP_CLOSE socket is finished */ #include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/fs.h> #include <linux/random.h> #include <linux/bootmem.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/ip.h> #include <asm/uaccess.h> #include <asm/ioctls.h> int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; atomic_t tcp_orphan_count = ATOMIC_INIT(0); EXPORT_SYMBOL_GPL(tcp_orphan_count); int sysctl_tcp_mem[3]; int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; EXPORT_SYMBOL(sysctl_tcp_mem); EXPORT_SYMBOL(sysctl_tcp_rmem); EXPORT_SYMBOL(sysctl_tcp_wmem); atomic_t tcp_memory_allocated; /* Current allocated memory. */ atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */ EXPORT_SYMBOL(tcp_memory_allocated); EXPORT_SYMBOL(tcp_sockets_allocated); /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. * All the sk_stream_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ int tcp_memory_pressure; EXPORT_SYMBOL(tcp_memory_pressure); void tcp_enter_memory_pressure(void) { if (!tcp_memory_pressure) { NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); tcp_memory_pressure = 1; } } EXPORT_SYMBOL(tcp_enter_memory_pressure); /* * Wait for a TCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; struct tcp_sock *tp = tcp_sk(sk); poll_wait(file, sk->sk_sleep, wait); if (sk->sk_state == TCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events by poll logic and correct handling of state changes made by another threads is impossible in any case. */ mask = 0; if (sk->sk_err) mask = POLLERR; /* * POLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a * socket the read side is more interesting. * * Some poll() documentation says that POLLHUP is incompatible * with the POLLOUT/POLLWR flags, so somebody should check this * all. But careful, it tends to be safer to return too many * bits than too few, and you can easily break real applications * if you don't tell them that something has hung up! * * Check-me. * * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and * our fs/select.c). It means that after we received EOF, * poll always returns immediately, making impossible poll() on write() * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP * if and only if shutdown has been made in both directions. * Actually, it is interesting to look how Solaris and DUX * solve this dilemma. I would prefer, if PULLHUP were maskable, * then we could set it on SND_SHUTDOWN. BTW examples given * in Stevens' books assume exactly this behaviour, it explains * why PULLHUP is incompatible with POLLOUT. --ANK * * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM; /* Connected? */ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { /* Potential race condition. If read of tp below will * escape above sk->sk_state, we can be illegally awaken * in SYN_* states. */ if ((tp->rcv_nxt != tp->copied_seq) && (tp->urg_seq != tp->copied_seq || tp->rcv_nxt != tp->copied_seq + 1 || sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data)) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. */ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } } if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; } return mask; } int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { struct tcp_sock *tp = tcp_sk(sk); int answ; switch (cmd) { case SIOCINQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; lock_sock(sk); if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || before(tp->urg_seq, tp->copied_seq) || !before(tp->urg_seq, tp->rcv_nxt)) { answ = tp->rcv_nxt - tp->copied_seq; /* Subtract 1, if FIN is in queue. */ if (answ && !skb_queue_empty(&sk->sk_receive_queue)) answ -= ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; } else answ = tp->urg_seq - tp->copied_seq; release_sock(sk); break; case SIOCATMARK: answ = tp->urg_data && tp->urg_seq == tp->copied_seq; break; case SIOCOUTQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else answ = tp->write_seq - tp->snd_una; break; default: return -ENOIOCTLCMD; }; return put_user(answ, (int __user *)arg); } static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; tp->pushed_seq = tp->write_seq; } static inline int forced_push(struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { skb->csum = 0; TCP_SKB_CB(skb)->seq = tp->write_seq; TCP_SKB_CB(skb)->end_seq = tp->write_seq; TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; TCP_SKB_CB(skb)->sacked = 0; skb_header_release(skb); __skb_queue_tail(&sk->sk_write_queue, skb); sk_charge_skb(sk, skb); if (!sk->sk_send_head) sk->sk_send_head = skb; if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; } static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, struct sk_buff *skb) { if (flags & MSG_OOB) { tp->urg_mode = 1; tp->snd_up = tp->write_seq; TCP_SKB_CB(skb)->sacked |= TCPCB_URG; } } static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, int mss_now, int nonagle) { if (sk->sk_send_head) { struct sk_buff *skb = sk->sk_write_queue.prev; if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, skb); tcp_mark_urg(tp, flags, skb); __tcp_push_pending_frames(sk, tp, mss_now, (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); } } static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) { struct tcp_sock *tp = tcp_sk(sk); int mss_now, size_goal; int err; ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); size_goal = tp->xmit_size_goal; copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; while (psize > 0) { struct sk_buff *skb = sk->sk_write_queue.prev; struct page *page = pages[poffset / PAGE_SIZE]; int copy, i, can_coalesce; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_pskb(sk, 0, 0, sk->sk_allocation); if (!skb) goto wait_for_memory; skb_entail(sk, tp, skb); copy = size_goal; } if (copy > size) copy = size; i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, page, offset); if (!can_coalesce && i >= MAX_SKB_FRAGS) { tcp_mark_push(tp, skb); goto new_segment; } if (!sk_stream_wmem_schedule(sk, copy)) goto wait_for_memory; if (can_coalesce) { skb_shinfo(skb)->frags[i - 1].size += copy; } else { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); } skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; sk->sk_forward_alloc -= copy; skb->ip_summed = CHECKSUM_HW; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; skb_shinfo(skb)->tso_segs = 0; if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; copied += copy; poffset += copy; if (!(psize -= copy)) goto out; if (skb->len < mss_now || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); } else if (skb == sk->sk_send_head) tcp_push_one(sk, mss_now); continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: if (copied) tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); size_goal = tp->xmit_size_goal; } out: if (copied) tcp_push(sk, tp, flags, mss_now, tp->nonagle); return copied; do_error: if (copied) goto out; out_err: return sk_stream_error(sk, flags, err); } ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; struct sock *sk = sock->sk; #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) if (!(sk->sk_route_caps & NETIF_F_SG) || !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS)) return sock_no_sendpage(sock, page, offset, size, flags); #undef TCP_ZC_CSUM_FLAGS lock_sock(sk); TCP_CHECK_TIMER(sk); res = do_tcp_sendpages(sk, &page, offset, size, flags); TCP_CHECK_TIMER(sk); release_sock(sk); return res; } #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) static inline int select_size(struct sock *sk, struct tcp_sock *tp) { int tmp = tp->mss_cache; if (sk->sk_route_caps & NETIF_F_SG) { if (sk->sk_route_caps & NETIF_F_TSO) tmp = 0; else { int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); if (tmp >= pgbreak && tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) tmp = pgbreak; } } return tmp; } int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags; int mss_now, size_goal; int err, copied; long timeo; lock_sock(sk); TCP_CHECK_TIMER(sk); flags = msg->msg_flags; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); size_goal = tp->xmit_size_goal; /* Ok commence sending. */ iovlen = msg->msg_iovlen; iov = msg->msg_iov; copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; while (--iovlen >= 0) { int seglen = iov->iov_len; unsigned char __user *from = iov->iov_base; iov++; while (seglen > 0) { int copy; skb = sk->sk_write_queue.prev; if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { new_segment: /* Allocate new segment. If the interface is SG, * allocate skb fitting to single page. */ if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), 0, sk->sk_allocation); if (!skb) goto wait_for_memory; /* * Check whether we can use HW checksum. */ if (sk->sk_route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) skb->ip_summed = CHECKSUM_HW; skb_entail(sk, tp, skb); copy = size_goal; } /* Try to append data to the end of skb. */ if (copy > seglen) copy = seglen; /* Where to copy to? */ if (skb_tailroom(skb) > 0) { /* We have some space in skb head. Superb! */ if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); if ((err = skb_add_data(skb, from, copy)) != 0) goto do_fault; } else { int merge = 0; int i = skb_shinfo(skb)->nr_frags; struct page *page = TCP_PAGE(sk); int off = TCP_OFF(sk); if (skb_can_coalesce(skb, i, page, off) && off != PAGE_SIZE) { /* We can extend the last page * fragment. */ merge = 1; } else if (i == MAX_SKB_FRAGS || (!i && !(sk->sk_route_caps & NETIF_F_SG))) { /* Need to add new fragment and cannot * do this because interface is non-SG, * or because all the page slots are * busy. */ tcp_mark_push(tp, skb); goto new_segment; } else if (page) { if (off == PAGE_SIZE) { put_page(page); TCP_PAGE(sk) = page = NULL; off = 0; } } else off = 0; if (copy > PAGE_SIZE - off) copy = PAGE_SIZE - off; if (!sk_stream_wmem_schedule(sk, copy)) goto wait_for_memory; if (!page) { /* Allocate new cache page. */ if (!(page = sk_stream_alloc_page(sk))) goto wait_for_memory; } /* Time to copy data. We are close to * the end! */ err = skb_copy_to_page(sk, from, skb, page, off, copy); if (err) { /* If this page was new, give it to the * socket so it does not get leaked. */ if (!TCP_PAGE(sk)) { TCP_PAGE(sk) = page; TCP_OFF(sk) = 0; } goto do_error; } /* Update the skb. */ if (merge) { skb_shinfo(skb)->frags[i - 1].size += copy; } else { skb_fill_page_desc(skb, i, page, off, copy); if (TCP_PAGE(sk)) { get_page(page); } else if (off + copy < PAGE_SIZE) { get_page(page); TCP_PAGE(sk) = page; } } TCP_OFF(sk) = off + copy; } if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; skb_shinfo(skb)->tso_segs = 0; from += copy; copied += copy; if ((seglen -= copy) == 0 && iovlen == 0) goto out; if (skb->len < mss_now || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); } else if (skb == sk->sk_send_head) tcp_push_one(sk, mss_now); continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: if (copied) tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); size_goal = tp->xmit_size_goal; } } out: if (copied) tcp_push(sk, tp, flags, mss_now, tp->nonagle); TCP_CHECK_TIMER(sk); release_sock(sk); return copied; do_fault: if (!skb->len) { if (sk->sk_send_head == skb) sk->sk_send_head = NULL; __skb_unlink(skb, &sk->sk_write_queue); sk_stream_free_skb(sk, skb); } do_error: if (copied) goto out; out_err: err = sk_stream_error(sk, flags, err); TCP_CHECK_TIMER(sk); release_sock(sk); return err; } /* * Handle reading urgent data. BSD has very simple semantics for * this, no blocking and very strange errors 8) */ static int tcp_recv_urg(struct sock *sk, long timeo, struct msghdr *msg, int len, int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); /* No URG data to read. */ if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || tp->urg_data == TCP_URG_READ) return -EINVAL; /* Yes this is right ! */ if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) return -ENOTCONN; if (tp->urg_data & TCP_URG_VALID) { int err = 0; char c = tp->urg_data; if (!(flags & MSG_PEEK)) tp->urg_data = TCP_URG_READ; /* Read urgent data. */ msg->msg_flags |= MSG_OOB; if (len > 0) { if (!(flags & MSG_TRUNC)) err = memcpy_toiovec(msg->msg_iov, &c, 1); len = 1; } else msg->msg_flags |= MSG_TRUNC; return err ? -EFAULT : len; } if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) return 0; /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and * the available implementations agree in this case: * this call should never block, independent of the * blocking state of the socket. * Mike <pall@rz.uni-karlsruhe.de> */ return -EAGAIN; } /* Clean up the receive buffer for full frames taken by the user, * then send an ACK if necessary. COPIED is the number of bytes * tcp_recvmsg has given to the user so far, it speeds up the * calculation of whether or not we must ACK for the sake of * a window update. */ static void cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); int time_to_ack = 0; #if TCP_DEBUG struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); #endif if (inet_csk_ack_scheduled(sk)) { const struct inet_connection_sock *icsk = inet_csk(sk); /* Delayed ACKs frequently hit locked sockets during bulk * receive. */ if (icsk->icsk_ack.blocked || /* Once-per-two-segments ACK was not sent by tcp_input.c */ tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || /* * If this read emptied read buffer, we send ACK, if * connection is not bidirectional, user drained * receive buffer and there was a small segment * in queue. */ (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) time_to_ack = 1; } /* We send an ACK if we can now advertise a non-zero window * which has been raised "significantly". * * Even if window raised up to infinity, do not send window open ACK * in states, where we will not receive more. It is useless. */ if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { __u32 rcv_window_now = tcp_receive_window(tp); /* Optimize, __tcp_select_window() is not cheap. */ if (2*rcv_window_now <= tp->window_clamp) { __u32 new_window = __tcp_select_window(sk); /* Send ACK now, if this read freed lots of space * in our buffer. Certainly, new_window is new window. * We can advertise it now, if it is not less than current one. * "Lots" means "at least twice" here. */ if (new_window && new_window >= 2 * rcv_window_now) time_to_ack = 1; } } if (time_to_ack) tcp_send_ack(sk); } static void tcp_prequeue_process(struct sock *sk) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ local_bh_disable(); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk->sk_backlog_rcv(sk, skb); local_bh_enable(); /* Clear memory counter. */ tp->ucopy.memory = 0; } static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) { struct sk_buff *skb; u32 offset; skb_queue_walk(&sk->sk_receive_queue, skb) { offset = seq - TCP_SKB_CB(skb)->seq; if (skb->h.th->syn) offset--; if (offset < skb->len || skb->h.th->fin) { *off = offset; return skb; } } return NULL; } /* * This routine provides an alternative to tcp_recvmsg() for routines * that would like to handle copying from skbuffs directly in 'sendfile' * fashion. * Note: * - It is assumed that the socket was locked by the caller. * - The routine does not block. * - At present, there is no support for reading OOB data * or for 'peeking' the socket using this routine * (although both would be easy to implement). */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); u32 seq = tp->copied_seq; u32 offset; int copied = 0; if (sk->sk_state == TCP_LISTEN) return -ENOTCONN; while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { if (offset < skb->len) { size_t used, len; len = skb->len - offset; /* Stop reading if we hit a patch of urgent data */ if (tp->urg_data) { u32 urg_offset = tp->urg_seq - seq; if (urg_offset < len) len = urg_offset; if (!len) break; } used = recv_actor(desc, skb, offset, len); if (used <= len) { seq += used; copied += used; offset += used; } if (offset != skb->len) break; } if (skb->h.th->fin) { sk_eat_skb(sk, skb); ++seq; break; } sk_eat_skb(sk, skb); if (!desc->count) break; } tp->copied_seq = seq; tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ if (copied) cleanup_rbuf(sk, copied); return copied; } /* * This routine copies from a sock struct into the user buffer. * * Technical note: in 2.3 we work on _locked_ socket, so that * tricks with *seq access order and skb->users are not required. * Probably, code can be easily improved even more. */ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); int copied = 0; u32 peek_seq; u32 *seq; unsigned long used; int err; int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; lock_sock(sk); TCP_CHECK_TIMER(sk); err = -ENOTCONN; if (sk->sk_state == TCP_LISTEN) goto out; timeo = sock_rcvtimeo(sk, nonblock); /* Urgent data needs to be handled specially. */ if (flags & MSG_OOB) goto recv_urg; seq = &tp->copied_seq; if (flags & MSG_PEEK) { peek_seq = tp->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); do { struct sk_buff *skb; u32 offset; /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ if (tp->urg_data && tp->urg_seq == *seq) { if (copied) break; if (signal_pending(current)) { copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } } /* Next get a buffer. */ skb = skb_peek(&sk->sk_receive_queue); do { if (!skb) break; /* Now that we have two receive queues this * shouldn't happen. */ if (before(*seq, TCP_SKB_CB(skb)->seq)) { printk(KERN_INFO "recvmsg bug: copied %X " "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); break; } offset = *seq - TCP_SKB_CB(skb)->seq; if (skb->h.th->syn) offset--; if (offset < skb->len) goto found_ok_skb; if (skb->h.th->fin) goto found_fin_ok; BUG_TRAP(flags & MSG_PEEK); skb = skb->next; } while (skb != (struct sk_buff *)&sk->sk_receive_queue); /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !sk->sk_backlog.tail) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || signal_pending(current) || (flags & MSG_PEEK)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } } cleanup_rbuf(sk, copied); if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { /* Install new reader */ if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { user_recv = current; tp->ucopy.task = user_recv; tp->ucopy.iov = msg->msg_iov; } tp->ucopy.len = len; BUG_TRAP(tp->copied_seq == tp->rcv_nxt || (flags & (MSG_PEEK | MSG_TRUNC))); /* Ugly... If prequeue is not empty, we have to * process it before releasing socket, otherwise * order will be broken at second iteration. * More elegant solution is required!!! * * Look: we have the following (pseudo)queues: * * 1. packets in flight * 2. backlog * 3. prequeue * 4. receive_queue * * Each queue can be processed only if the next ones * are empty. At this point we have empty receive_queue. * But prequeue _can_ be not empty after 2nd iteration, * when we jumped to start of loop because backlog * processing added something to receive_queue. * We cannot release_sock(), because backlog contains * packets arrived _after_ prequeued ones. * * Shortly, algorithm is clear --- to process all * the queues in order. We could make it more directly, * requeueing packets from backlog to prequeue, if * is not empty. It is more elegant, but eats cycles, * unfortunately. */ if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ } if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo); if (user_recv) { int chunk; /* __ Restore normal policy in scheduler __ */ if ((chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); len -= chunk; copied += chunk; } if (tp->rcv_nxt == tp->copied_seq && !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); if ((chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } } } if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { if (net_ratelimit()) printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", current->comm, current->pid); peek_seq = tp->copied_seq; } continue; found_ok_skb: /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; /* Do we have urgent data here? */ if (tp->urg_data) { u32 urg_offset = tp->urg_seq - *seq; if (urg_offset < used) { if (!urg_offset) { if (!sock_flag(sk, SOCK_URGINLINE)) { ++*seq; offset++; used--; if (!used) goto skip_copy; } } else used = urg_offset; } } if (!(flags & MSG_TRUNC)) { err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used); if (err) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } *seq += used; copied += used; len -= used; tcp_rcv_space_adjust(sk); skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { tp->urg_data = 0; tcp_fast_path_check(sk, tp); } if (used + offset < skb->len) continue; if (skb->h.th->fin) goto found_fin_ok; if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb); continue; found_fin_ok: /* Process the FIN. */ ++*seq; if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb); break; } while (len > 0); if (user_recv) { if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; tcp_prequeue_process(sk); if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } } tp->ucopy.task = NULL; tp->ucopy.len = 0; } /* According to UNIX98, msg_name/msg_namelen are ignored * on connected socket. I was just happy when found this 8) --ANK */ /* Clean up data we have read: This will do ACK frames. */ cleanup_rbuf(sk, copied); TCP_CHECK_TIMER(sk); release_sock(sk); return copied; out: TCP_CHECK_TIMER(sk); release_sock(sk); return err; recv_urg: err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len); goto out; } /* * State processing on a close. This implements the state shift for * sending our FIN frame. Note that we only send a FIN for some * states. A shutdown() may have already sent the FIN, or we may be * closed. */ static const unsigned char new_state[16] = { /* current state: new state: action: */ /* (Invalid) */ TCP_CLOSE, /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, /* TCP_SYN_SENT */ TCP_CLOSE, /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, /* TCP_TIME_WAIT */ TCP_CLOSE, /* TCP_CLOSE */ TCP_CLOSE, /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, /* TCP_LAST_ACK */ TCP_LAST_ACK, /* TCP_LISTEN */ TCP_CLOSE, /* TCP_CLOSING */ TCP_CLOSING, }; static int tcp_close_state(struct sock *sk) { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; tcp_set_state(sk, ns); return next & TCP_ACTION_FIN; } /* * Shutdown the sending side of a connection. Much like close except * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD). */ void tcp_shutdown(struct sock *sk, int how) { /* We need to grab some memory, and put together a FIN, * and then put it into the queue to be sent. * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. */ if (!(how & SEND_SHUTDOWN)) return; /* If we've already sent a FIN, or it's a closed state, skip this. */ if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) tcp_send_fin(sk); } } void tcp_close(struct sock *sk, long timeout) { struct sk_buff *skb; int data_was_unread = 0; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); /* Special case. */ inet_csk_listen_stop(sk); goto adjudge_to_death; } /* We need to flush the recv. buffs. We do this only on the * descriptor close, not protocol-sourced closes, because the * reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - skb->h.th->fin; data_was_unread += len; __kfree_skb(skb); } sk_stream_mem_reclaim(sk); /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section * 3.10, we send a RST here because data was lost. To * witness the awful effects of the old behavior of always * doing a FIN, run an older 2.1.x kernel or 2.0.x, start * a bulk GET in an FTP client, suspend the process, wait * for the client to advertise a zero window, then kill -9 * the FTP client, wheee... Note: timeout is always zero * in such a case. */ if (data_was_unread) { /* Unread data was tossed, zap the connection. */ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_KERNEL); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); } else if (tcp_close_state(sk)) { /* We FIN if the application ate all the data before * zapping the connection. */ /* RED-PEN. Formally speaking, we have broken TCP state * machine. State transitions: * * TCP_ESTABLISHED -> TCP_FIN_WAIT1 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) * TCP_CLOSE_WAIT -> TCP_LAST_ACK * * are legal only when FIN has been sent (i.e. in window), * rather than queued out of window. Purists blame. * * F.e. "RFC state" is ESTABLISHED, * if Linux state is FIN-WAIT-1, but FIN is still not sent. * * The visible declinations are that sometimes * we enter time-wait state, when it is not required really * (harmless), do not send active resets, when they are * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when * they look as CLOSING or LAST_ACK for Linux) * Probably, I missed some more holelets. * --ANK */ tcp_send_fin(sk); } sk_stream_wait_close(sk, timeout); adjudge_to_death: /* It is the last release_sock in its life. It will remove backlog. */ release_sock(sk); /* Now socket is owned by kernel and we acquire BH lock to finish close. No need to check for user refs. */ local_bh_disable(); bh_lock_sock(sk); BUG_TRAP(!sock_owned_by_user(sk)); sock_hold(sk); sock_orphan(sk); /* This is a (useful) BSD violating of the RFC. There is a * problem with TCP as specified in that the other end could * keep a socket open forever with no application left this end. * We use a 3 minute timeout (about the same as BSD) then kill * our end. If they send after that then tough - BUT: long enough * that we won't make the old 4*rto = almost no time - whoops * reset mistake. * * Nope, it was not mistake. It is really desired behaviour * f.e. on http servers, when such sockets are useless, but * consume significant resources. Let's do it with special * linger2 option. --ANK */ if (sk->sk_state == TCP_FIN_WAIT2) { struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); } else { const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); } else { atomic_inc(sk->sk_prot->orphan_count); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; } } } if (sk->sk_state != TCP_CLOSE) { sk_stream_mem_reclaim(sk); if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans || (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { if (net_ratelimit()) printk(KERN_INFO "TCP: too many of orphaned " "sockets\n"); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); } } atomic_inc(sk->sk_prot->orphan_count); if (sk->sk_state == TCP_CLOSE) inet_csk_destroy_sock(sk); /* Otherwise, socket is reprieved until protocol close. */ out: bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); } /* These states need RST on ABORT according to RFC793 */ static inline int tcp_need_reset(int state) { return (1 << state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); } int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); /* ABORT function of RFC793 */ if (old_state == TCP_LISTEN) { inet_csk_listen_stop(sk); } else if (tcp_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ tcp_send_active_reset(sk, gfp_any()); sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); sk_stream_writequeue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); inet->dport = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt = 0; if ((tp->write_seq += tp->max_window + 2) == 0) tp->write_seq = 1; icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->packets_out = 0; tp->snd_ssthresh = 0x7fffffff; tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; tcp_set_ca_state(sk, TCP_CA_Open); tcp_clear_retrans(tp); inet_csk_delack_init(sk); sk->sk_send_head = NULL; tp->rx_opt.saw_tstamp = 0; tcp_sack_reset(&tp->rx_opt); __sk_dst_reset(sk); BUG_TRAP(!inet->num || icsk->icsk_bind_hash); sk->sk_error_report(sk); return err; } /* * Socket option code for TCP. */ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int val; int err = 0; if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); /* This is a string value all the others are int's */ if (optname == TCP_CONGESTION) { char name[TCP_CA_NAME_MAX]; if (optlen < 1) return -EINVAL; val = strncpy_from_user(name, optval, min(TCP_CA_NAME_MAX-1, optlen)); if (val < 0) return -EFAULT; name[val] = 0; lock_sock(sk); err = tcp_set_congestion_control(sk, name); release_sock(sk); return err; } if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; lock_sock(sk); switch (optname) { case TCP_MAXSEG: /* Values greater than interface MTU won't take effect. However * at the point when this call is done we typically don't yet * know which interface is going to be used */ if (val < 8 || val > MAX_TCP_WINDOW) { err = -EINVAL; break; } tp->rx_opt.user_mss = val; break; case TCP_NODELAY: if (val) { /* TCP_NODELAY is weaker than TCP_CORK, so that * this option on corked socket is remembered, but * it is not activated until cork is cleared. * * However, when TCP_NODELAY is set we make * an explicit push, which overrides even TCP_CORK * for currently queued segments. */ tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; tcp_push_pending_frames(sk, tp); } else { tp->nonagle &= ~TCP_NAGLE_OFF; } break; case TCP_CORK: /* When set indicates to always queue non-full frames. * Later the user clears this option and we transmit * any pending partial frames in the queue. This is * meant to be used alongside sendfile() to get properly * filled frames when the user (for example) must write * out headers with a write() call first and then use * sendfile to send out the data parts. * * TCP_CORK can be set together with TCP_NODELAY and it is * stronger than TCP_NODELAY. */ if (val) { tp->nonagle |= TCP_NAGLE_CORK; } else { tp->nonagle &= ~TCP_NAGLE_CORK; if (tp->nonagle&TCP_NAGLE_OFF) tp->nonagle |= TCP_NAGLE_PUSH; tcp_push_pending_frames(sk, tp); } break; case TCP_KEEPIDLE: if (val < 1 || val > MAX_TCP_KEEPIDLE) err = -EINVAL; else { tp->keepalive_time = val * HZ; if (sock_flag(sk, SOCK_KEEPOPEN) && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; if (tp->keepalive_time > elapsed) elapsed = tp->keepalive_time - elapsed; else elapsed = 0; inet_csk_reset_keepalive_timer(sk, elapsed); } } break; case TCP_KEEPINTVL: if (val < 1 || val > MAX_TCP_KEEPINTVL) err = -EINVAL; else tp->keepalive_intvl = val * HZ; break; case TCP_KEEPCNT: if (val < 1 || val > MAX_TCP_KEEPCNT) err = -EINVAL; else tp->keepalive_probes = val; break; case TCP_SYNCNT: if (val < 1 || val > MAX_TCP_SYNCNT) err = -EINVAL; else icsk->icsk_syn_retries = val; break; case TCP_LINGER2: if (val < 0) tp->linger2 = -1; else if (val > sysctl_tcp_fin_timeout / HZ) tp->linger2 = 0; else tp->linger2 = val * HZ; break; case TCP_DEFER_ACCEPT: icsk->icsk_accept_queue.rskq_defer_accept = 0; if (val > 0) { /* Translate value in seconds to number of * retransmits */ while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && val > ((TCP_TIMEOUT_INIT / HZ) << icsk->icsk_accept_queue.rskq_defer_accept)) icsk->icsk_accept_queue.rskq_defer_accept++; icsk->icsk_accept_queue.rskq_defer_accept++; } break; case TCP_WINDOW_CLAMP: if (!val) { if (sk->sk_state != TCP_CLOSE) { err = -EINVAL; break; } tp->window_clamp = 0; } else tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? SOCK_MIN_RCVBUF / 2 : val; break; case TCP_QUICKACK: if (!val) { icsk->icsk_ack.pingpong = 1; } else { icsk->icsk_ack.pingpong = 0; if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; cleanup_rbuf(sk, 1); if (!(val & 1)) icsk->icsk_ack.pingpong = 1; } } break; default: err = -ENOPROTOOPT; break; }; release_sock(sk); return err; } /* Return information about state of tcp endpoint in API format. */ void tcp_get_info(struct sock *sk, struct tcp_info *info) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 now = tcp_time_stamp; memset(info, 0, sizeof(*info)); info->tcpi_state = sk->sk_state; info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; info->tcpi_probes = icsk->icsk_probes_out; info->tcpi_backoff = icsk->icsk_backoff; if (tp->rx_opt.tstamp_ok) info->tcpi_options |= TCPI_OPT_TIMESTAMPS; if (tp->rx_opt.sack_ok) info->tcpi_options |= TCPI_OPT_SACK; if (tp->rx_opt.wscale_ok) { info->tcpi_options |= TCPI_OPT_WSCALE; info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; } if (tp->ecn_flags&TCP_ECN_OK) info->tcpi_options |= TCPI_OPT_ECN; info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); info->tcpi_snd_mss = tp->mss_cache; info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; info->tcpi_unacked = tp->packets_out; info->tcpi_sacked = tp->sacked_out; info->tcpi_lost = tp->lost_out; info->tcpi_retrans = tp->retrans_out; info->tcpi_fackets = tp->fackets_out; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); info->tcpi_pmtu = icsk->icsk_pmtu_cookie; info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; info->tcpi_snd_ssthresh = tp->snd_ssthresh; info->tcpi_snd_cwnd = tp->snd_cwnd; info->tcpi_advmss = tp->advmss; info->tcpi_reordering = tp->reordering; info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; info->tcpi_rcv_space = tp->rcvq_space.space; info->tcpi_total_retrans = tp->total_retrans; } EXPORT_SYMBOL_GPL(tcp_get_info); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int val, len; if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case TCP_MAXSEG: val = tp->mss_cache; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); break; case TCP_CORK: val = !!(tp->nonagle&TCP_NAGLE_CORK); break; case TCP_KEEPIDLE: val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; break; case TCP_KEEPINTVL: val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; break; case TCP_KEEPCNT: val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; break; case TCP_SYNCNT: val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; break; case TCP_LINGER2: val = tp->linger2; if (val >= 0) val = (val ? : sysctl_tcp_fin_timeout) / HZ; break; case TCP_DEFER_ACCEPT: val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); break; case TCP_WINDOW_CLAMP: val = tp->window_clamp; break; case TCP_INFO: { struct tcp_info info; if (get_user(len, optlen)) return -EFAULT; tcp_get_info(sk, &info); len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } case TCP_QUICKACK: val = !icsk->icsk_ack.pingpong; break; case TCP_CONGESTION: if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0; default: return -ENOPROTOOPT; }; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } extern void __skb_cb_too_small_for_tcp(int, int); extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) { if (!str) return 0; thash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("thash_entries=", set_thash_entries); void __init tcp_init(void) { struct sk_buff *skb = NULL; int order, i; /* DEP: Lacking an __init call in tcp_minisocks.c, I am * putting this registration here */ OSA_REGISTER_SPINLOCK(&tcp_death_row.death_lock, "inet_timewait_death_row->death_lock", 35); if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb)) __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), sizeof(skb->cb)); tcp_hashinfo.bind_bucket_cachep = kmem_cache_create("tcp_bind_bucket", sizeof(struct inet_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!tcp_hashinfo.bind_bucket_cachep) panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); /* Size and allocate the main established and bind bucket * hash tables. * * The methodology is similar to that of the buffer cache. */ tcp_hashinfo.ehash = alloc_large_system_hash("TCP established", sizeof(struct inet_ehash_bucket), thash_entries, (num_physpages >= 128 * 1024) ? 13 : 15, HASH_HIGHMEM, &tcp_hashinfo.ehash_size, NULL, 0); tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { rwlock_init(&tcp_hashinfo.ehash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); } tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_size, (num_physpages >= 128 * 1024) ? 13 : 15, HASH_HIGHMEM, &tcp_hashinfo.bhash_size, NULL, 64 * 1024); tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); OSA_REGISTER_SPINLOCK(&tcp_hashinfo.bhash[i].lock, "tcp_hashinfo.bhash->lock", 24); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); } /* Try to be a bit smarter and adjust defaults depending * on available memory. */ for (order = 0; ((1 << order) << PAGE_SHIFT) < (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); order++) ; if (order >= 4) { sysctl_local_port_range[0] = 32768; sysctl_local_port_range[1] = 61000; tcp_death_row.sysctl_max_tw_buckets = 180000; sysctl_tcp_max_orphans = 4096 << (order - 4); sysctl_max_syn_backlog = 1024; } else if (order < 3) { sysctl_local_port_range[0] = 1024 * (3 - order); tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); sysctl_tcp_max_orphans >>= (3 - order); sysctl_max_syn_backlog = 128; } sysctl_tcp_mem[0] = 768 << order; sysctl_tcp_mem[1] = 1024 << order; sysctl_tcp_mem[2] = 1536 << order; if (order < 3) { sysctl_tcp_wmem[2] = 64 * 1024; sysctl_tcp_rmem[0] = PAGE_SIZE; sysctl_tcp_rmem[1] = 43689; sysctl_tcp_rmem[2] = 2 * 43689; } printk(KERN_INFO "TCP: Hash tables configured " "(established %d bind %d)\n", tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); tcp_register_congestion_control(&tcp_reno); } EXPORT_SYMBOL(tcp_close); EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_getsockopt); EXPORT_SYMBOL(tcp_ioctl); EXPORT_SYMBOL(tcp_poll); EXPORT_SYMBOL(tcp_read_sock); EXPORT_SYMBOL(tcp_recvmsg); EXPORT_SYMBOL(tcp_sendmsg); EXPORT_SYMBOL(tcp_sendpage); EXPORT_SYMBOL(tcp_setsockopt); EXPORT_SYMBOL(tcp_shutdown); EXPORT_SYMBOL(tcp_statistics);
{ "content_hash": "97add5dfee780204df61991ac8a0840c", "timestamp": "", "source": "github", "line_count": 2152, "max_line_length": 84, "avg_line_length": 27.67193308550186, "alnum_prop": 0.6345088161209068, "repo_name": "ut-osa/syncchar", "id": "6afb53210b310585b0411a815527388edd1dbdd3", "size": "59550", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "linux-2.6.16-unmod/net/ipv4/tcp.c", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "4526" }, { "name": "Assembly", "bytes": "7269561" }, { "name": "C", "bytes": "191363313" }, { "name": "C++", "bytes": "2703790" }, { "name": "Objective-C", "bytes": "515305" }, { "name": "Perl", "bytes": "118289" }, { "name": "Python", "bytes": "160654" }, { "name": "Scala", "bytes": "12158" }, { "name": "Shell", "bytes": "48243" }, { "name": "TeX", "bytes": "51367" }, { "name": "UnrealScript", "bytes": "20822" }, { "name": "XSLT", "bytes": "310" } ], "symlink_target": "" }
<resources> <string name="app_name">Todos with Dagger</string> <string name="title_activity_main">Todos with Dagger</string> <string name="btn_add_todo">Add</string> <string name="edit_new_todo_hint">new task</string> <string name="dialog_delete_todo_title">Delete todo</string> <string name="dialog_delete_todo_message">Are you sure you want to delete "%s"?</string> <string name="dialog_delete_todo_btn_positive">OK</string> <string name="dialog_delete_todo_btn_neutral">Cancel</string> <string name="toast_add_todo">Added new task !</string> <string name="toast_delete_todo">Deleted task $d !</string> <string name="menu_clear_completed_todo">Clear completed tasks</string> <string name="menu_undo_todo">Undo</string> <string name="menu_redo_todo">Redo</string> </resources>
{ "content_hash": "f2f0d06d9b079733914d2aa1e5542402", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 92, "avg_line_length": 43.89473684210526, "alnum_prop": 0.6954436450839329, "repo_name": "izumin5210/Droidux", "id": "491c16508b101db88e62e5b55a59306cb8250e72", "size": "834", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/todos-with-dagger/src/main/res/values/strings.xml", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Groovy", "bytes": "9743" }, { "name": "Java", "bytes": "132765" } ], "symlink_target": "" }
<!DOCTYPE html> <html lang="en" data-ng-app="controls.app"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags --> <title>Bootstrap 101 Template</title> <!-- Bootstrap --> <link href="bower_components/bootstrap/dist/css/bootstrap.min.css" rel="stylesheet"> <link href="assets/css/calendar.css" rel="stylesheet"> <!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries --> <!-- WARNING: Respond.js doesn't work if you view the page via file:// --> <!--[if lt IE 9]> <script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script> <script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script> <![endif]--> </head> <body data-ng-controller="calendarCtrl"> <calendar settings="settings"></calendar> <!-- jQuery (necessary for Bootstrap's JavaScript plugins) --> <script src="bower_components/jquery/dist/jquery.min.js"></script> <!-- Include all compiled plugins (below), or include individual files as needed --> <script src="bower_components/bootstrap/dist/js/bootstrap.min.js"></script> <script src="bower_components/moment/min/moment-with-locales.min.js"></script> <script src="bower_components/angular/angular.min.js"></script> <script src="bower_components/angular-animate/angular-animate.min.js"></script> <script src="bower_components/angular-route/angular-route.min.js"></script> <script src="bower_components/angular-sanitize/angular-sanitize.min.js"></script> <script src="angular-1x/controls.module.js"></script> <script src="angular-1x/calendar/calendar.controller.js"></script> <script src="angular-1x/calendar/calendar.directive.js"></script> </body> </html>
{ "content_hash": "9df1f10d47dd7262e685fb16ab43082d", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 117, "avg_line_length": 47.42857142857143, "alnum_prop": 0.6767068273092369, "repo_name": "lucasduane/angular-controls", "id": "cec6cc693cf6b2c9e1c63e53656587515fb58a19", "size": "1992", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "index-angular-1.html", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2352" }, { "name": "HTML", "bytes": "4170" }, { "name": "JavaScript", "bytes": "6479" } ], "symlink_target": "" }
using content::BrowserThread; using syncer::FakeSyncManager; using syncer::SyncManager; using ::testing::InvokeWithoutArgs; using ::testing::StrictMock; using ::testing::_; namespace browser_sync { namespace { ACTION_P(Signal, event) { event->Signal(); } void QuitMessageLoop() { base::MessageLoop::current()->Quit(); } class MockSyncFrontend : public SyncFrontend { public: virtual ~MockSyncFrontend() {} MOCK_METHOD3( OnBackendInitialized, void(const syncer::WeakHandle<syncer::JsBackend>&, const syncer::WeakHandle<syncer::DataTypeDebugInfoListener>&, bool)); MOCK_METHOD0(OnSyncCycleCompleted, void()); MOCK_METHOD1(OnConnectionStatusChange, void(syncer::ConnectionStatus status)); MOCK_METHOD0(OnClearServerDataSucceeded, void()); MOCK_METHOD0(OnClearServerDataFailed, void()); MOCK_METHOD2(OnPassphraseRequired, void(syncer::PassphraseRequiredReason, const sync_pb::EncryptedData&)); MOCK_METHOD0(OnPassphraseAccepted, void()); MOCK_METHOD2(OnEncryptedTypesChanged, void(syncer::ModelTypeSet, bool)); MOCK_METHOD0(OnEncryptionComplete, void()); MOCK_METHOD1(OnMigrationNeededForTypes, void(syncer::ModelTypeSet)); MOCK_METHOD1(OnExperimentsChanged, void(const syncer::Experiments&)); MOCK_METHOD1(OnActionableError, void(const syncer::SyncProtocolError& sync_error)); MOCK_METHOD0(OnSyncConfigureRetry, void()); }; class FakeSyncManagerFactory : public syncer::SyncManagerFactory { public: explicit FakeSyncManagerFactory(FakeSyncManager** fake_manager) : fake_manager_(fake_manager) { *fake_manager_ = NULL; } virtual ~FakeSyncManagerFactory() {} // SyncManagerFactory implementation. Called on the sync thread. virtual scoped_ptr<SyncManager> CreateSyncManager( std::string name) OVERRIDE { *fake_manager_ = new FakeSyncManager(initial_sync_ended_types_, progress_marker_types_, configure_fail_types_); return scoped_ptr<SyncManager>(*fake_manager_); } void set_initial_sync_ended_types(syncer::ModelTypeSet types) { initial_sync_ended_types_ = types; } void set_progress_marker_types(syncer::ModelTypeSet types) { progress_marker_types_ = types; } void set_configure_fail_types(syncer::ModelTypeSet types) { configure_fail_types_ = types; } private: syncer::ModelTypeSet initial_sync_ended_types_; syncer::ModelTypeSet progress_marker_types_; syncer::ModelTypeSet configure_fail_types_; FakeSyncManager** fake_manager_; }; class SyncBackendHostTest : public testing::Test { protected: SyncBackendHostTest() : thread_bundle_(content::TestBrowserThreadBundle::REAL_IO_THREAD), fake_manager_(NULL) {} virtual ~SyncBackendHostTest() {} virtual void SetUp() OVERRIDE { profile_.reset(new TestingProfile()); sync_prefs_.reset(new SyncPrefs(profile_->GetPrefs())); backend_.reset(new SyncBackendHostImpl( profile_->GetDebugName(), profile_.get(), sync_prefs_->AsWeakPtr())); credentials_.email = "user@example.com"; credentials_.sync_token = "sync_token"; fake_manager_factory_.reset(new FakeSyncManagerFactory(&fake_manager_)); // These types are always implicitly enabled. enabled_types_.PutAll(syncer::ControlTypes()); // NOTE: We can't include Passwords or Typed URLs due to the Sync Backend // Registrar removing them if it can't find their model workers. enabled_types_.Put(syncer::BOOKMARKS); enabled_types_.Put(syncer::NIGORI); enabled_types_.Put(syncer::DEVICE_INFO); enabled_types_.Put(syncer::PREFERENCES); enabled_types_.Put(syncer::SESSIONS); enabled_types_.Put(syncer::SEARCH_ENGINES); enabled_types_.Put(syncer::AUTOFILL); enabled_types_.Put(syncer::EXPERIMENTS); network_resources_.reset(new syncer::HttpBridgeNetworkResources()); } virtual void TearDown() OVERRIDE { if (backend_) { backend_->StopSyncingForShutdown(); backend_->Shutdown(SyncBackendHost::STOP); } backend_.reset(); sync_prefs_.reset(); profile_.reset(); // Pump messages posted by the sync thread (which may end up // posting on the IO thread). base::RunLoop().RunUntilIdle(); content::RunAllPendingInMessageLoop(BrowserThread::IO); // Pump any messages posted by the IO thread. base::RunLoop().RunUntilIdle(); } // Synchronously initializes the backend. void InitializeBackend(bool expect_success) { EXPECT_CALL(mock_frontend_, OnBackendInitialized(_, _, expect_success)). WillOnce(InvokeWithoutArgs(QuitMessageLoop)); backend_->Initialize( &mock_frontend_, scoped_ptr<base::Thread>(), syncer::WeakHandle<syncer::JsEventHandler>(), GURL(std::string()), credentials_, true, fake_manager_factory_.PassAs<syncer::SyncManagerFactory>(), scoped_ptr<syncer::UnrecoverableErrorHandler>( new syncer::TestUnrecoverableErrorHandler).Pass(), NULL, network_resources_.get()); base::RunLoop run_loop; BrowserThread::PostDelayedTask(BrowserThread::UI, FROM_HERE, run_loop.QuitClosure(), TestTimeouts::action_timeout()); run_loop.Run(); // |fake_manager_factory_|'s fake_manager() is set on the sync // thread, but we can rely on the message loop barriers to // guarantee that we see the updated value. DCHECK(fake_manager_); } // Synchronously configures the backend's datatypes. void ConfigureDataTypes(syncer::ModelTypeSet types_to_add, syncer::ModelTypeSet types_to_remove) { BackendDataTypeConfigurer::DataTypeConfigStateMap config_state_map; BackendDataTypeConfigurer::SetDataTypesState( BackendDataTypeConfigurer::CONFIGURE_ACTIVE, types_to_add, &config_state_map); BackendDataTypeConfigurer::SetDataTypesState( BackendDataTypeConfigurer::DISABLED, types_to_remove, &config_state_map); types_to_add.PutAll(syncer::ControlTypes()); backend_->ConfigureDataTypes( syncer::CONFIGURE_REASON_RECONFIGURATION, config_state_map, base::Bind(&SyncBackendHostTest::DownloadReady, base::Unretained(this)), base::Bind(&SyncBackendHostTest::OnDownloadRetry, base::Unretained(this))); base::RunLoop run_loop; BrowserThread::PostDelayedTask(BrowserThread::UI, FROM_HERE, run_loop.QuitClosure(), TestTimeouts::action_timeout()); run_loop.Run(); } void IssueRefreshRequest(syncer::ModelTypeSet types) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); content::NotificationService::current()->Notify( chrome::NOTIFICATION_SYNC_REFRESH_LOCAL, content::Source<Profile>(profile_.get()), content::Details<syncer::ModelTypeSet>(&types)); } protected: void DownloadReady(syncer::ModelTypeSet succeeded_types, syncer::ModelTypeSet failed_types) { base::MessageLoop::current()->Quit(); } void OnDownloadRetry() { NOTIMPLEMENTED(); } content::TestBrowserThreadBundle thread_bundle_; StrictMock<MockSyncFrontend> mock_frontend_; syncer::SyncCredentials credentials_; scoped_ptr<TestingProfile> profile_; scoped_ptr<SyncPrefs> sync_prefs_; scoped_ptr<SyncBackendHost> backend_; scoped_ptr<FakeSyncManagerFactory> fake_manager_factory_; FakeSyncManager* fake_manager_; syncer::ModelTypeSet enabled_types_; scoped_ptr<syncer::NetworkResources> network_resources_; }; // Test basic initialization with no initial types (first time initialization). // Only the nigori should be configured. TEST_F(SyncBackendHostTest, InitShutdown) { InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( syncer::ControlTypes())); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals( syncer::ControlTypes())); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( syncer::ControlTypes()).Empty()); } // Test first time sync scenario. All types should be properly configured. TEST_F(SyncBackendHostTest, FirstTimeSync) { InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( syncer::ControlTypes())); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals( syncer::ControlTypes())); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( syncer::ControlTypes()).Empty()); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().HasAll( Difference(enabled_types_, syncer::ControlTypes()))); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Test the restart after setting up sync scenario. No enabled types should be // downloaded or cleaned. TEST_F(SyncBackendHostTest, Restart) { sync_prefs_->SetSyncSetupCompleted(); syncer::ModelTypeSet all_but_nigori = enabled_types_; fake_manager_factory_->set_progress_marker_types(enabled_types_); fake_manager_factory_->set_initial_sync_ended_types(enabled_types_); InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty()); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty()); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Test a sync restart scenario where some types had never finished configuring. // The partial types should be purged, then reconfigured properly. TEST_F(SyncBackendHostTest, PartialTypes) { sync_prefs_->SetSyncSetupCompleted(); // Set sync manager behavior before passing it down. All types have progress // markers, but nigori and bookmarks are missing initial sync ended. syncer::ModelTypeSet partial_types(syncer::NIGORI, syncer::BOOKMARKS); syncer::ModelTypeSet full_types = Difference(enabled_types_, partial_types); fake_manager_factory_->set_progress_marker_types(enabled_types_); fake_manager_factory_->set_initial_sync_ended_types(full_types); // Bringing up the backend should purge all partial types, then proceed to // download the Nigori. InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( syncer::ModelTypeSet(syncer::NIGORI))); EXPECT_TRUE(fake_manager_->GetAndResetCleanedTypes().HasAll(partial_types)); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals( Union(full_types, syncer::ModelTypeSet(syncer::NIGORI)))); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Equals( Difference(partial_types, syncer::ModelTypeSet(syncer::NIGORI)))); // Now do the actual configuration, which should download and apply bookmarks. ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( partial_types)); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Test the behavior when we lose the sync db. Although we already have types // enabled, we should re-download all of them because we lost their data. TEST_F(SyncBackendHostTest, LostDB) { sync_prefs_->SetSyncSetupCompleted(); // Initialization should fetch the Nigori node. Everything else should be // left untouched. InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( syncer::ModelTypeSet(syncer::ControlTypes()))); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals( syncer::ModelTypeSet(syncer::ControlTypes()))); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Equals( Difference(enabled_types_, syncer::ControlTypes()))); // The database was empty, so any cleaning is entirely optional. We want to // reset this value before running the next part of the test, though. fake_manager_->GetAndResetCleanedTypes(); // The actual configuration should redownload and apply all the enabled types. ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().HasAll( Difference(enabled_types_, syncer::ControlTypes()))); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } TEST_F(SyncBackendHostTest, DisableTypes) { // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetCleanedTypes(); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( enabled_types_)); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); // Then disable two datatypes. syncer::ModelTypeSet disabled_types(syncer::BOOKMARKS, syncer::SEARCH_ENGINES); syncer::ModelTypeSet old_types = enabled_types_; enabled_types_.RemoveAll(disabled_types); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); // Only those datatypes disabled should be cleaned. Nothing should be // downloaded. EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty()); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), old_types).Equals(disabled_types)); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } TEST_F(SyncBackendHostTest, AddTypes) { // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetCleanedTypes(); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( enabled_types_)); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); // Then add two datatypes. syncer::ModelTypeSet new_types(syncer::EXTENSIONS, syncer::APPS); enabled_types_.PutAll(new_types); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); // Only those datatypes added should be downloaded (plus nigori). Nothing // should be cleaned aside from the disabled types. EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( Union(new_types, syncer::ModelTypeSet(syncer::NIGORI)))); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // And and disable in the same configuration. TEST_F(SyncBackendHostTest, AddDisableTypes) { // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetCleanedTypes(); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( enabled_types_)); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); // Then add two datatypes. syncer::ModelTypeSet old_types = enabled_types_; syncer::ModelTypeSet disabled_types(syncer::BOOKMARKS, syncer::SEARCH_ENGINES); syncer::ModelTypeSet new_types(syncer::EXTENSIONS, syncer::APPS); enabled_types_.PutAll(new_types); enabled_types_.RemoveAll(disabled_types); ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); // Only those datatypes added should be downloaded (plus nigori). Nothing // should be cleaned aside from the disabled types. EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( Union(new_types, syncer::ModelTypeSet(syncer::NIGORI)))); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), old_types).Equals(disabled_types)); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( old_types).Equals(disabled_types)); } // Test restarting the browser to newly supported datatypes. The new datatypes // should be downloaded on the configuration after backend initialization. TEST_F(SyncBackendHostTest, NewlySupportedTypes) { sync_prefs_->SetSyncSetupCompleted(); // Set sync manager behavior before passing it down. All types have progress // markers and initial sync ended except the new types. syncer::ModelTypeSet old_types = enabled_types_; fake_manager_factory_->set_progress_marker_types(old_types); fake_manager_factory_->set_initial_sync_ended_types(old_types); syncer::ModelTypeSet new_types(syncer::APP_SETTINGS, syncer::EXTENSION_SETTINGS); enabled_types_.PutAll(new_types); // Does nothing. InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty()); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), old_types).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(old_types)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Equals(new_types)); // Downloads and applies the new types. ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( Union(new_types, syncer::ModelTypeSet(syncer::NIGORI)))); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Test the newly supported types scenario, but with the presence of partial // types as well. Both partial and newly supported types should be downloaded // the configuration. TEST_F(SyncBackendHostTest, NewlySupportedTypesWithPartialTypes) { sync_prefs_->SetSyncSetupCompleted(); // Set sync manager behavior before passing it down. All types have progress // markers and initial sync ended except the new types. syncer::ModelTypeSet old_types = enabled_types_; syncer::ModelTypeSet partial_types(syncer::NIGORI, syncer::BOOKMARKS); syncer::ModelTypeSet full_types = Difference(enabled_types_, partial_types); fake_manager_factory_->set_progress_marker_types(old_types); fake_manager_factory_->set_initial_sync_ended_types(full_types); syncer::ModelTypeSet new_types(syncer::APP_SETTINGS, syncer::EXTENSION_SETTINGS); enabled_types_.PutAll(new_types); // Purge the partial types. The nigori will be among the purged types, but // the syncer will re-download it by the time the initialization is complete. InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( syncer::ModelTypeSet(syncer::NIGORI))); EXPECT_TRUE(fake_manager_->GetAndResetCleanedTypes().HasAll(partial_types)); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals( syncer::Union(full_types, syncer::ModelTypeSet(syncer::NIGORI)))); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Equals(Union(new_types, Difference( partial_types, syncer::ModelTypeSet(syncer::NIGORI))))); // Downloads and applies the new types and partial types (which includes // nigori anyways). ConfigureDataTypes(enabled_types_, Difference(syncer::ModelTypeSet::All(), enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals( Union(new_types, partial_types))); EXPECT_TRUE(Intersection(fake_manager_->GetAndResetCleanedTypes(), enabled_types_).Empty()); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetAndResetEnabledTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Ensure the device info tracker is initialized properly on startup. TEST_F(SyncBackendHostTest, InitializeDeviceInfo) { ASSERT_EQ(NULL, backend_->GetSyncedDeviceTracker()); InitializeBackend(true); const SyncedDeviceTracker* device_tracker = backend_->GetSyncedDeviceTracker(); ASSERT_TRUE(device_tracker->ReadLocalDeviceInfo()); } // Verify that downloading control types only downloads those types that do // not have initial sync ended set. TEST_F(SyncBackendHostTest, DownloadControlTypes) { sync_prefs_->SetSyncSetupCompleted(); // Set sync manager behavior before passing it down. Experiments and device // info are new types without progress markers or initial sync ended, while // all other types have been fully downloaded and applied. syncer::ModelTypeSet new_types(syncer::EXPERIMENTS, syncer::DEVICE_INFO); syncer::ModelTypeSet old_types = Difference(enabled_types_, new_types); fake_manager_factory_->set_progress_marker_types(old_types); fake_manager_factory_->set_initial_sync_ended_types(old_types); // Bringing up the backend should download the new types without downloading // any old types. InitializeBackend(true); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Equals(new_types)); EXPECT_TRUE(fake_manager_->GetAndResetCleanedTypes().Equals( Difference(syncer::ModelTypeSet::All(), enabled_types_))); EXPECT_TRUE(fake_manager_->InitialSyncEndedTypes().Equals(enabled_types_)); EXPECT_TRUE(fake_manager_->GetTypesWithEmptyProgressMarkerToken( enabled_types_).Empty()); } // Fail to download control types. It's believed that there is a server bug // which can allow this to happen (crbug.com/164288). The sync backend host // should detect this condition and fail to initialize the backend. // // The failure is "silent" in the sense that the GetUpdates request appears to // be successful, but it returned no results. This means that the usual // download retry logic will not be invoked. TEST_F(SyncBackendHostTest, SilentlyFailToDownloadControlTypes) { fake_manager_factory_->set_configure_fail_types(syncer::ModelTypeSet::All()); InitializeBackend(false); } // Test that local refresh requests are delivered to sync. TEST_F(SyncBackendHostTest, ForwardLocalRefreshRequest) { InitializeBackend(true); syncer::ModelTypeSet set1 = syncer::ModelTypeSet::All(); IssueRefreshRequest(set1); fake_manager_->WaitForSyncThread(); EXPECT_TRUE(set1.Equals(fake_manager_->GetLastRefreshRequestTypes())); syncer::ModelTypeSet set2 = syncer::ModelTypeSet(syncer::SESSIONS); IssueRefreshRequest(set2); fake_manager_->WaitForSyncThread(); EXPECT_TRUE(set2.Equals(fake_manager_->GetLastRefreshRequestTypes())); } // Test that local invalidations issued before sync is initialized are ignored. TEST_F(SyncBackendHostTest, AttemptForwardLocalRefreshRequestEarly) { syncer::ModelTypeSet set1 = syncer::ModelTypeSet::All(); IssueRefreshRequest(set1); InitializeBackend(true); fake_manager_->WaitForSyncThread(); EXPECT_FALSE(set1.Equals(fake_manager_->GetLastRefreshRequestTypes())); } // Test that local invalidations issued while sync is shutting down are ignored. TEST_F(SyncBackendHostTest, AttemptForwardLocalRefreshRequestLate) { InitializeBackend(true); backend_->StopSyncingForShutdown(); syncer::ModelTypeSet types = syncer::ModelTypeSet::All(); IssueRefreshRequest(types); fake_manager_->WaitForSyncThread(); EXPECT_FALSE(types.Equals(fake_manager_->GetLastRefreshRequestTypes())); backend_->Shutdown(SyncBackendHost::STOP); backend_.reset(); } // Test that configuration on signin sends the proper GU source. TEST_F(SyncBackendHostTest, DownloadControlTypesNewClient) { InitializeBackend(true); EXPECT_EQ(syncer::CONFIGURE_REASON_NEW_CLIENT, fake_manager_->GetAndResetConfigureReason()); } // Test that configuration on restart sends the proper GU source. TEST_F(SyncBackendHostTest, DownloadControlTypesRestart) { sync_prefs_->SetSyncSetupCompleted(); fake_manager_factory_->set_progress_marker_types(enabled_types_); fake_manager_factory_->set_initial_sync_ended_types(enabled_types_); InitializeBackend(true); EXPECT_EQ(syncer::CONFIGURE_REASON_NEWLY_ENABLED_DATA_TYPE, fake_manager_->GetAndResetConfigureReason()); } // It is SyncBackendHostCore responsibility to cleanup Sync Data folder if sync // setup hasn't been completed. This test ensures that cleanup happens. TEST_F(SyncBackendHostTest, TestStartupWithOldSyncData) { const char* nonsense = "slon"; base::FilePath temp_directory = profile_->GetPath().AppendASCII("Sync Data"); base::FilePath sync_file = temp_directory.AppendASCII("SyncData.sqlite3"); ASSERT_TRUE(base::CreateDirectory(temp_directory)); ASSERT_NE(-1, file_util::WriteFile(sync_file, nonsense, strlen(nonsense))); InitializeBackend(true); EXPECT_FALSE(base::PathExists(sync_file)); } } // namespace } // namespace browser_sync
{ "content_hash": "9f3a9cea20ff06784788f6b4b3c90683", "timestamp": "", "source": "github", "line_count": 666, "max_line_length": 80, "avg_line_length": 43.108108108108105, "alnum_prop": 0.7034482758620689, "repo_name": "ChromiumWebApps/chromium", "id": "21547212eb98098e9d04d1991c6bd1153cb07b88", "size": "30647", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chrome/browser/sync/glue/sync_backend_host_impl_unittest.cc", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "52960" }, { "name": "Awk", "bytes": "8660" }, { "name": "C", "bytes": "42286199" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "198616766" }, { "name": "CSS", "bytes": "937333" }, { "name": "DOT", "bytes": "2984" }, { "name": "Java", "bytes": "5695686" }, { "name": "JavaScript", "bytes": "21967126" }, { "name": "M", "bytes": "2190" }, { "name": "Matlab", "bytes": "2262" }, { "name": "Objective-C", "bytes": "7602057" }, { "name": "PHP", "bytes": "97817" }, { "name": "Perl", "bytes": "1210885" }, { "name": "Python", "bytes": "10774996" }, { "name": "R", "bytes": "262" }, { "name": "Shell", "bytes": "1316721" }, { "name": "Tcl", "bytes": "277091" }, { "name": "TypeScript", "bytes": "1560024" }, { "name": "XSLT", "bytes": "13493" }, { "name": "nesC", "bytes": "15243" } ], "symlink_target": "" }
require 'hashie' require 'hive/extensions/hashie_validate_enum' module Hive module Activities module Hotels class Payment < Hashie::Trash include Hashie::Extensions::IgnoreUndeclared property :subtotal, required: true property :total, required: true property :currency, required: true property :source, required: true end class PurchaseActivity < Hashie::Trash include Hashie::Extensions::IgnoreUndeclared include Hashie::Extensions::Coercion coerce_key :guests, Guest coerce_key :stay, Stay coerce_key :rates, Array[Rate] coerce_key :payment, Payment coerce_key :customer, Customer coerce_key :rooms, Array[Room] property :reservationId property :guests, required: true property :stay, required: true property :rates, default: [] property :payment, required: true property :customer property :rooms, default: [] def add_rate(args) rates << Rate.new(args) end def add_room(args) rooms << Room.new(args) end end end end end
{ "content_hash": "5355065d297898e649020258b91f9a40", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 52, "avg_line_length": 26.177777777777777, "alnum_prop": 0.6171477079796265, "repo_name": "denniss/wix-hive-ruby", "id": "fde954ac2b453ef8a27b81a5f0699bda88634653", "size": "1264", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/hive/activities/hotels/hotels_purchase_activity.rb", "mode": "33188", "license": "mit", "language": [ { "name": "Ruby", "bytes": "155553" } ], "symlink_target": "" }
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent" android:layout_height="match_parent" xmlns:app="http://schemas.android.com/apk/res-auto" tools:context="com.createchance.doorgod.lockfragments.PatternLockFragment"> <Button android:id="@+id/pattern_lock_more" android:layout_width="40dp" android:layout_height="40dp" android:layout_alignParentEnd="true" android:background="@drawable/ic_more_vert_white_48dp"/> <LinearLayout android:orientation="vertical" android:layout_width="match_parent" android:layout_height="match_parent" android:layout_centerInParent="true"> <TextView android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_gravity="center_horizontal" android:layout_marginTop="50dp" android:fontFamily="sans-serif-thin" android:text="@string/fragment_pattern_view_title" android:textColor="@color/white" android:textSize="25sp"/> <TextView android:id="@+id/fingerprint_hint" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_gravity="center_horizontal" android:fontFamily="sans-serif-thin" android:text="@string/fragment_pattern_view_fingerprint" android:textColor="@color/white" android:textSize="20sp"/> <ImageView android:id="@+id/fingerprint_icon" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_gravity="center_horizontal" android:src="@drawable/ic_fingerprint_white_48dp"/> </LinearLayout> <com.createchance.doorgod.lockfragments.Lock9View android:id="@+id/patternView" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_gravity="center" android:layout_marginBottom="80dp" android:layout_alignParentBottom="true" android:layout_centerHorizontal="true" app:lock9_nodeSrc="@drawable/node_normal" app:lock9_nodeOnSrc="@drawable/node_active" app:lock9_nodeOnAnim="@anim/node_on_scale" app:lock9_padding="50dp" app:lock9_spacing="50dp" app:lock9_lineColor="@color/patternViewColor" app:lock9_lineWidth="8dp" app:lock9_autoLink="false" app:lock9_enableVibrate="true" app:lock9_vibrateTime="20" /> </RelativeLayout>
{ "content_hash": "067b50f059b1a4b7986dec574abea17f", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 79, "avg_line_length": 39.231884057971016, "alnum_prop": 0.643886220908755, "repo_name": "CreateChance/DoorGod", "id": "fb0f64d645bf0008b05fec148cb83a28e84292d3", "size": "2707", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/src/main/res/layout/fragment_pattern_lock.xml", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "228872" } ], "symlink_target": "" }
<?php declare(strict_types=1); namespace Sylius\Bundle\PromotionBundle\Validator; use Sylius\Bundle\PromotionBundle\Validator\Constraints\PromotionDateRange; use Sylius\Component\Promotion\Model\PromotionInterface; use Symfony\Component\Validator\Constraint; use Symfony\Component\Validator\ConstraintValidator; use Webmozart\Assert\Assert; /** * @author Łukasz Chruściel <lukasz.chrusciel@lakion.com> */ final class PromotionDateRangeValidator extends ConstraintValidator { /** * {@inheritdoc} */ public function validate($value, Constraint $constraint) { if (null === $value) { return; } /** @var PromotionInterface $value */ Assert::isInstanceOf($value, PromotionInterface::class); /** @var PromotionDateRange $constraint */ Assert::isInstanceOf($constraint, PromotionDateRange::class); if (null === $value->getStartsAt() || null === $value->getEndsAt()) { return; } if ($value->getStartsAt()->getTimestamp() > $value->getEndsAt()->getTimestamp()) { $this->context ->buildViolation($constraint->message) ->atPath('endsAt') ->addViolation() ; } } }
{ "content_hash": "ab0c09ccafa920b0d1647658ba46f74a", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 90, "avg_line_length": 26.914893617021278, "alnum_prop": 0.6308300395256917, "repo_name": "regnisolbap/Sylius", "id": "4574b1622547b66cc739ea58868e2af6b4e3f4bd", "size": "1478", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "src/Sylius/Bundle/PromotionBundle/Validator/PromotionDateRangeValidator.php", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "601" }, { "name": "CSS", "bytes": "2150" }, { "name": "Gherkin", "bytes": "789647" }, { "name": "HTML", "bytes": "303018" }, { "name": "JavaScript", "bytes": "71083" }, { "name": "PHP", "bytes": "6689113" }, { "name": "Shell", "bytes": "28860" } ], "symlink_target": "" }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>qarith: Not compatible 👼</title> <link rel="shortcut icon" type="image/png" href="../../../../../favicon.png" /> <link href="../../../../../bootstrap.min.css" rel="stylesheet"> <link href="../../../../../bootstrap-custom.css" rel="stylesheet"> <link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet"> <script src="../../../../../moment.min.js"></script> <!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries --> <!-- WARNING: Respond.js doesn't work if you view the page via file:// --> <!--[if lt IE 9]> <script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script> <script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script> <![endif]--> </head> <body> <div class="container"> <div class="navbar navbar-default" role="navigation"> <div class="container-fluid"> <div class="navbar-header"> <a class="navbar-brand" href="../../../../.."><i class="fa fa-lg fa-flag-checkered"></i> Coq bench</a> </div> <div id="navbar" class="collapse navbar-collapse"> <ul class="nav navbar-nav"> <li><a href="../..">clean / released</a></li> <li class="active"><a href="">8.13.0 / qarith - 8.9.0</a></li> </ul> </div> </div> </div> <div class="article"> <div class="row"> <div class="col-md-12"> <a href="../..">« Up</a> <h1> qarith <small> 8.9.0 <span class="label label-info">Not compatible 👼</span> </small> </h1> <p>📅 <em><script>document.write(moment("2022-10-04 18:00:14 +0000", "YYYY-MM-DD HH:mm:ss Z").fromNow());</script> (2022-10-04 18:00:14 UTC)</em><p> <h2>Context</h2> <pre># Packages matching: installed # Name # Installed # Synopsis base-bigarray base base-threads base base-unix base conf-findutils 1 Virtual package relying on findutils conf-gmp 4 Virtual package relying on a GMP lib system installation coq 8.13.0 Formal proof management system num 1.4 The legacy Num library for arbitrary-precision integer and rational arithmetic ocaml 4.12.1 The OCaml compiler (virtual package) ocaml-base-compiler 4.12.1 Official release 4.12.1 ocaml-config 2 OCaml Switch Configuration ocaml-options-vanilla 1 Ensure that OCaml is compiled with no special options enabled ocamlfind 1.9.5 A library manager for OCaml zarith 1.12 Implements arithmetic and logical operations over arbitrary-precision integers # opam file: opam-version: &quot;2.0&quot; maintainer: &quot;Hugo.Herbelin@inria.fr&quot; homepage: &quot;https://github.com/coq-contribs/qarith&quot; license: &quot;LGPL 2.1&quot; build: [make &quot;-j%{jobs}%&quot;] install: [make &quot;install&quot;] remove: [&quot;rm&quot; &quot;-R&quot; &quot;%{lib}%/coq/user-contrib/QArith&quot;] depends: [ &quot;ocaml&quot; &quot;coq&quot; {&gt;= &quot;8.9&quot; &amp; &lt; &quot;8.10~&quot;} ] tags: [ &quot;keyword: Q&quot; &quot;keyword: arithmetic&quot; &quot;keyword: rational numbers&quot; &quot;keyword: setoid&quot; &quot;keyword: ring&quot; &quot;category: Mathematics/Arithmetic and Number Theory/Rational numbers&quot; &quot;category: Miscellaneous/Extracted Programs/Arithmetic&quot; ] authors: [ &quot;Pierre Letouzey&quot; ] bug-reports: &quot;https://github.com/coq-contribs/qarith/issues&quot; dev-repo: &quot;git+https://github.com/coq-contribs/qarith.git&quot; synopsis: &quot;A Library for Rational Numbers (QArith)&quot; description: &quot;&quot;&quot; This contribution is a proposition of a library formalizing rational number in Coq.&quot;&quot;&quot; flags: light-uninstall url { src: &quot;https://github.com/coq-contribs/qarith/archive/v8.9.0.tar.gz&quot; checksum: &quot;md5=dbb5eb51a29032589cd351ea9eaf49a0&quot; } </pre> <h2>Lint</h2> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>true</code></dd> <dt>Return code</dt> <dd>0</dd> </dl> <h2>Dry install 🏜️</h2> <p>Dry install with the current Coq version:</p> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>opam install -y --show-action coq-qarith.8.9.0 coq.8.13.0</code></dd> <dt>Return code</dt> <dd>5120</dd> <dt>Output</dt> <dd><pre>[NOTE] Package coq is already installed (current version is 8.13.0). The following dependencies couldn&#39;t be met: - coq-qarith -&gt; coq &lt; 8.10~ -&gt; ocaml &lt; 4.10 base of this switch (use `--unlock-base&#39; to force) No solution found, exiting </pre></dd> </dl> <p>Dry install without Coq/switch base, to test if the problem was incompatibility with the current Coq/OCaml version:</p> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>opam remove -y coq; opam install -y --show-action --unlock-base coq-qarith.8.9.0</code></dd> <dt>Return code</dt> <dd>0</dd> </dl> <h2>Install dependencies</h2> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>true</code></dd> <dt>Return code</dt> <dd>0</dd> <dt>Duration</dt> <dd>0 s</dd> </dl> <h2>Install 🚀</h2> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>true</code></dd> <dt>Return code</dt> <dd>0</dd> <dt>Duration</dt> <dd>0 s</dd> </dl> <h2>Installation size</h2> <p>No files were installed.</p> <h2>Uninstall 🧹</h2> <dl class="dl-horizontal"> <dt>Command</dt> <dd><code>true</code></dd> <dt>Return code</dt> <dd>0</dd> <dt>Missing removes</dt> <dd> none </dd> <dt>Wrong removes</dt> <dd> none </dd> </dl> </div> </div> </div> <hr/> <div class="footer"> <p class="text-center"> Sources are on <a href="https://github.com/coq-bench">GitHub</a> © Guillaume Claret 🐣 </p> </div> </div> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script> <script src="../../../../../bootstrap.min.js"></script> </body> </html>
{ "content_hash": "e7628cdd9915f1b234ec296ea7a8a328", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 159, "avg_line_length": 40.777142857142856, "alnum_prop": 0.5447029147982063, "repo_name": "coq-bench/coq-bench.github.io", "id": "ab3591ef4f19f1c4cfd26664fdedb1e40fb3f239", "size": "7161", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "clean/Linux-x86_64-4.12.1-2.0.8/released/8.13.0/qarith/8.9.0.html", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
{% extends "admin/base.html" %} {% block title %}{{ title }} | {{ site_title|default:_('Django site admin') }}{% endblock %} {% block branding %} <h1 id="site-name"><a href="{% url 'admin:index' %}">Midori's blog Administration</a></h1> {% endblock %} {% block nav-global %}{% endblock %}
{ "content_hash": "806d191dfffa4af8bf80c7d92b7c027b", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 92, "avg_line_length": 32.44444444444444, "alnum_prop": 0.5993150684931506, "repo_name": "midori1/midorinoblog", "id": "294056a2588c62d3d62d3d28bedd24db2f130a58", "size": "292", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "templates/admin/base_site.html", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "394892" }, { "name": "HTML", "bytes": "122713" }, { "name": "JavaScript", "bytes": "781200" }, { "name": "Python", "bytes": "5065171" }, { "name": "Ruby", "bytes": "2596" } ], "symlink_target": "" }
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE module PUBLIC "-//NetBeans//DTD Module Status 1.0//EN" "http://www.netbeans.org/dtds/module-status-1_0.dtd"> <module name="org.apache.commons.logging"> <param name="autoload">true</param> <param name="eager">false</param> <param name="jar">modules/org-apache-commons-logging.jar</param> <param name="reloadable">false</param> </module>
{ "content_hash": "3d47a6520a1f45eb64b30a220bd63615", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 77, "avg_line_length": 47.22222222222222, "alnum_prop": 0.6517647058823529, "repo_name": "shamanDevel/RPG-Editor", "id": "51ec7e483db1d252646967d7368df0bff1dd3d67", "size": "425", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "ide/config/Modules/org-apache-commons-logging.xml", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "24616" }, { "name": "CSS", "bytes": "3923" }, { "name": "Java", "bytes": "140105" }, { "name": "Shell", "bytes": "82856" }, { "name": "XSLT", "bytes": "221100" } ], "symlink_target": "" }
package com.example.peterhu.myapplication; import android.content.Context; import android.graphics.BitmapFactory; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.Drawable; import android.view.View; import android.view.ViewGroup; import android.view.animation.AlphaAnimation; import android.view.animation.AnimationUtils; import android.widget.Button; import android.widget.ImageView; import android.widget.RelativeLayout; import android.widget.TextView; /** * Created by peterhu on 2017/3/30. */ public class PHEmptyDataSet extends RelativeLayout{ /** * 没有网络的时候PHEmptyNoDataNoNetwork; * 没有数据的时候PHEmptyNoData; * */ public enum TapNoDataType { PHEmptyNoDataNoNetwork, PHEmptyNoData, } /** * 代理接口 * */ public interface PHEmptyDataSetDelegate { /** * 点击图片实现加载,通过返回bool值决定视图是否消失,不设置返回null,使用默认配置。 * @param layout 布局 * @param type 无数据类型 * @return 返回值 */ Boolean didTapEmptyDataView(RelativeLayout layout,TapNoDataType type); /** * 获取自定义的视图,不设置返回null,使用默认配置。 * @param layout 布局 * @param type 无数据类型 * @return 返回的按钮 */ Button viewForEmptyDataSet(RelativeLayout layout,TapNoDataType type); /** * 设置背景颜色 * @param layout 布局 * @param type 无数据类型 * @return 返回的颜色值 */ Integer backgroundColorForEmptyDataSet(RelativeLayout layout,TapNoDataType type); /** * 设置垂直偏移量 * @param layout 布局 * @param type 无数据类型 * @return 返回垂直偏移量 */ Integer verticalOffsetForEmptyDataSet(RelativeLayout layout,TapNoDataType type); /** * 设置水平偏移 * @param layout 布局 * @param type 无数据类型 * @return 返回水平偏移量 */ Integer horizonOffsetForEmptyDataSet(RelativeLayout layout, TapNoDataType type); } public Context mcontext; public PHEmptyDataSetDelegate delegate; private ViewGroup msuperGroup; private View mfatherView; private ImageView imageBTN; private TextView titleV; private boolean isFirstCreated; public PHEmptyDataSet(View fatherView){ super(fatherView.getContext()); mcontext = fatherView.getContext(); this.isFirstCreated = false; this.msuperGroup = (ViewGroup) fatherView.getParent(); this.mfatherView = fatherView; ViewGroup.LayoutParams mparam = mfatherView.getLayoutParams(); this.setLayoutParams(mparam); this.imageBTN = new ImageView(mcontext); this.imageBTN.setId(9527); titleV = new TextView(mcontext); } /** * 有数据的时候调用,数据大于一条。 */ public void hasData(){ switchView(false); } /** * 无数据时调用 */ public void noData(){ setPHEmptyDataSetWithType(TapNoDataType.PHEmptyNoData); } /** * 无网络时调用 */ public void noNetWork(){ setPHEmptyDataSetWithType(TapNoDataType.PHEmptyNoDataNoNetwork); } private void setPHEmptyDataSetWithType(TapNoDataType type){ imageBTN.setClickable(true); Integer verticalOff = delegate == null ? null: delegate.verticalOffsetForEmptyDataSet(this,type); if (verticalOff == null) { verticalOff = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_offset_v_noData : PHEmptyDataSetConstants.DataSet_offset_v_noNetWork; } Integer horizonOff = delegate == null ? null: delegate.horizonOffsetForEmptyDataSet(this,type); if (horizonOff == null) { horizonOff = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_offset_h_noData : PHEmptyDataSetConstants.DataSet_offset_h_noNetWork; } Integer color = delegate == null ? null: delegate.backgroundColorForEmptyDataSet(this,type); if (color == null){ color = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_backGround_noData : PHEmptyDataSetConstants.DataSet_backGround_noNetWork; } setBackgroundColor(color); String title ; Drawable draw; Integer titleColor; Float titleSize; Integer mwidth; Integer mheight; Button button = delegate == null ? null: delegate.viewForEmptyDataSet(this,type); if (button == null){ title = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_title_noData : PHEmptyDataSetConstants.DataSet_title_noNetWork; draw = (type == TapNoDataType.PHEmptyNoData) ? new BitmapDrawable(getResources(),BitmapFactory.decodeResource(getResources(), PHEmptyDataSetConstants.DataSet_pic_noData)) : new BitmapDrawable(getResources(),BitmapFactory.decodeResource(getResources(), PHEmptyDataSetConstants.DataSet_pic_noNetWork)); titleColor = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_titleColor_noData : PHEmptyDataSetConstants.DataSet_titleColor_noNetWork; titleSize = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_titleSize_noData : PHEmptyDataSetConstants.DataSet_titleSize_noNetWork; mwidth = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_width_noData : PHEmptyDataSetConstants.DataSet_width_noNetWork; mheight = (type == TapNoDataType.PHEmptyNoData) ? PHEmptyDataSetConstants.DataSet_height_noData : PHEmptyDataSetConstants.DataSet_height_noNetWork; } else { title = (String)button.getText(); draw = button.getBackground(); titleColor = button.getCurrentTextColor(); titleSize = button.getTextSize(); mwidth = button.getLayoutParams().width; mheight = button.getLayoutParams().height; } Integer superHeight = getLayoutParams().height == -1 ? msuperGroup.getHeight():getLayoutParams().height; Integer superWidth = getLayoutParams().width == -1 ? msuperGroup.getWidth():getLayoutParams().width; imageBTN.setImageDrawable(draw); imageBTN.setScaleType(ImageView.ScaleType.FIT_XY); titleV.setText(title); titleV.setTextColor(titleColor); titleV.setTextSize(titleSize); titleV.setTextAlignment(TEXT_ALIGNMENT_CENTER); RelativeLayout.LayoutParams param1 = new RelativeLayout.LayoutParams(mwidth,mheight); RelativeLayout.LayoutParams param2 = new RelativeLayout.LayoutParams(superWidth * 2 / 3, ViewGroup.LayoutParams.WRAP_CONTENT); param1.addRule(RelativeLayout.ALIGN_PARENT_START); param1.addRule(RelativeLayout.ALIGN_PARENT_TOP); param2.addRule(RelativeLayout.BELOW,9527); param1.topMargin = (superHeight - param1.height) / 2 + verticalOff; param1.leftMargin = (superWidth - param1.width) / 2 + horizonOff; param2.topMargin = PHEmptyDataSetConstants.DataSet_Image_title_padding; param2.leftMargin = (superWidth - param2.width) / 2 + horizonOff; imageBTN.setLayoutParams(param1); titleV.setLayoutParams(param2); if (!isFirstCreated){ addView(titleV); addView(imageBTN); isFirstCreated = true; } final TapNoDataType mtype = type; imageBTN.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { imageBTN.setClickable(false); Boolean isAllowedClick = delegate == null ? null: delegate.didTapEmptyDataView(PHEmptyDataSet.this,mtype); if (isAllowedClick == null){isAllowedClick = false;} if (isAllowedClick) {PHEmptyDataSet.this.hasData();} } }); switchView(true); } private void switchView(boolean isChange) { if (isChange) { AlphaAnimation alphaAnimation = (AlphaAnimation) AnimationUtils.loadAnimation(mcontext, R.anim.alphato1); int index = msuperGroup.indexOfChild(mfatherView); if (index != -1){ msuperGroup.removeView(mfatherView); } if((this.getParent() == null)&&(index != -1)){ msuperGroup.addView(this,index); this.startAnimation(alphaAnimation);} } else { AlphaAnimation alphaAnimation = (AlphaAnimation) AnimationUtils.loadAnimation(mcontext,R.anim.alphato1); int index = msuperGroup.indexOfChild(this); if (index != -1){ msuperGroup.removeView(this); } if ((mfatherView.getParent() == null)&&(index != -1)){ msuperGroup.addView(mfatherView,index); mfatherView.startAnimation(alphaAnimation);} } } }
{ "content_hash": "3255584955a820a021266442534fa530", "timestamp": "", "source": "github", "line_count": 234, "max_line_length": 281, "avg_line_length": 39.876068376068375, "alnum_prop": 0.6211552888222055, "repo_name": "HeterPu/PHEmptyDataSetForAndroid", "id": "104a140ce1198ee272bd58ef5eb221458d65b94a", "size": "9701", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "MyApplication/app/src/main/java/com/example/peterhu/myapplication/PHEmptyDataSet.java", "mode": "33261", "license": "mit", "language": [ { "name": "Java", "bytes": "15399" } ], "symlink_target": "" }
from amonagent.modules.processes import processes_data_collector from amonagent.modules.core import ( get_uptime, get_memory_info, get_cpu_utilization, get_load_average, disk_check, get_network_traffic, get_ip_address, get_cpu_info ) from amonagent.modules.distro import get_distro from amonagent.modules.plugins import discover_plugins import logging log = logging.getLogger(__name__) class Runner(object): def __init__(self): self.plugins_list = discover_plugins() def info(self): system_info_dict = { 'processor': get_cpu_info(), 'ip_address': get_ip_address(), 'distro': get_distro(), } return system_info_dict def system(self): system_data_dict = { 'memory': get_memory_info(), 'cpu': get_cpu_utilization(), 'disk': disk_check.check(), 'network': get_network_traffic(), 'loadavg': get_load_average(), 'uptime': get_uptime(), } return system_data_dict def processes(self): return processes_data_collector.collect() def plugins(self): plugin_result_dict = {} for plugin in self.plugins_list: # Don't stop the agent if the plugin data cannot be collected try: plugin.collect() plugin_result_dict[plugin.name] = plugin.result except: log.exception("Can't collect data for plugin: {0}".format(plugin.name)) return False return plugin_result_dict runner = Runner()
{ "content_hash": "444c76103935ebeb7f2afcc785a3ccc2", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 75, "avg_line_length": 19.91304347826087, "alnum_prop": 0.6899563318777293, "repo_name": "amonapp/amonagent-legacy", "id": "fa5554322f4c9dd81f36eb7ddb8bec3624d5d196", "size": "1374", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "amonagent/runner.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "4892" }, { "name": "Puppet", "bytes": "330" }, { "name": "Python", "bytes": "62122" }, { "name": "SaltStack", "bytes": "1193" }, { "name": "Shell", "bytes": "10114" } ], "symlink_target": "" }
<html> <head> <title>Sweat the Small Stuff (BBC Three)</title> <script type="text/javascript" src="../common.js"></script> <link rel="stylesheet" media="all" href="../style.css" type="text/css"/> <script type="text/javascript" src="../panelshows.js"></script> <meta name="twitter:card" content="summary"> <meta name="twitter:site" content=""> <meta name="twitter:url" property="og:url" content="http://www.strudel.org.uk/panelshows/shows/sweatthesmallstuff"> <meta name="twitter:title" property="og:title" content="Sweat the Small Stuff (BBC Three)"> <meta name="twitter:description" property="og:description" content="Panel shows on UK television and radio include few female guests. We compile stats to see how bad the problem is."> <meta name="twitter:image" property="og:image" content="http://strudel.org.uk/images/thumb_panelshows.png"> </head> <body> <!--#include virtual="nav.txt" --> <div class="page"> <h1><a href="https://en.wikipedia.org/wiki/Sweat_the_Small_Stuff#Episodes">Sweat the Small Stuff</a> (BBC Three/Talkback; 2013-)</h1> <div style="float:right;margin-top:-0.5em;">Key: <span class="male">&nbsp;male&nbsp;</span> <span class="female">&nbsp;female&nbsp;</span> <span class="other">&nbsp;other/diverse&nbsp;</span></div> <p>By <a href="http://twitter.com/astronomyblog">@astronomyblog</a> &amp; <a href="http://twitter.com/LoveGraphs">@LoveGraphs</a>. Stats last updated 9 April 2021.</p> <div class="show" id="sweatthesmallstuff"> <p>It seems there may have been at least <strong>32 shows</strong> and 242 listed appearances. With 7 people per show, <a href="https://en.wikipedia.org/wiki/Sweat_the_Small_Stuff#Episodes">Wikipedia</a> has <strong>8% more appearances listed than required</strong> (and probably a similar percent of people) perhaps due to listing special guests. The host/captains/regulars have included <a href="../people/gr0xurn4.html">Nick Grimshaw</a> (Host &times; 32), <a href="../people/w0tesu4r.html">Melvin Odoom</a> (Team Captain &times; 32), <a href="../people/zoe5sjqv.html">Rochelle Humes</a> (Team Captain &times; 24, Panellist &times; 5), <a href="../people/u2ywgfmh.html">Rickie Haywood Williams</a> (Team Captain &times; 8).</p> <h3>Appearances</h3> <p class="label">Total appearances:</p> <div class="bar"><div class="male" style="width:67.4%" title="67.4% male">67.4% (163)</div><div class="female" style="width:32.6%" title="32.6% female">32.6% (79)</div></div> <p class="label">Guest appearances i.e. ignoring the host/captains/regulars:</p> <div class="bar"><div class="male" style="width:62.3%" title="62.3% male">62.3% (91)</div><div class="female" style="width:37.7%" title="37.7% female">37.7% (55)</div></div> <h3>People</h3> <p class="label">All people:</p> <div class="bar"><div class="male" style="width:59.0%" title="59.0% male">59.0% (69)</div><div class="female" style="width:41.0%" title="41.0% female">41.0% (48)</div></div> <p class="label">Guests i.e. ignoring the host/captains/regulars:</p> <div class="bar"><div class="male" style="width:57.9%" title="57.9% male">57.9% (66)</div><div class="female" style="width:42.1%" title="42.1% female">42.1% (48)</div></div> <div id="sweatthesmallstuff_graph" class="graph"></div> <div class="git"><a href="https://github.com/slowe/panelshows/blob/main/shows/data/sweatthesmallstuff.csv" class="repo">Edit <em>Sweat the Small Stuff</em> cast list</a></div> <p>Last updated: 2021/04/09</p> </div> </div> </body> </html>
{ "content_hash": "86610a0bebbadc101999d5c45c93f7f7", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 735, "avg_line_length": 97.16666666666667, "alnum_prop": 0.6889651229273871, "repo_name": "slowe/panelshows", "id": "5e6f20a78cfabcdc1639a2ba7a15a5ebf37d17ba", "size": "3498", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "shows/sweatthesmallstuff.html", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8431" }, { "name": "HTML", "bytes": "25483901" }, { "name": "JavaScript", "bytes": "95028" }, { "name": "Perl", "bytes": "19899" } ], "symlink_target": "" }
<!DOCTYPE html> <meta charset="utf-8"> <title>CSS Text Test: white-space: pre-wrap</title> <link rel="author" title="Javier Fernandez Garcia-Boente" href="mailto:jfernandez@igalia.com"> <link rel="help" href="https://drafts.csswg.org/css-text-3/#valdef-white-space-pre-wrap"> <link rel="help" href="https://drafts.csswg.org/css-text-3/#valdef-word-break-break-word"> <meta name="flags" content="ahem"> <link rel="match" href="reference/pre-wrap-001-ref.html"> <meta name="assert" content="The word is not broken if there are previous breaking opportunities, honoring the white-space: pre-wrap value."> <link rel="stylesheet" type="text/css" href="/fonts/ahem.css" /> <style> div { font-size: 20px; font-family: Ahem; line-height: 1em; } .red { position: absolute; white-space: pre; background: green; color: red; width: 40px; height: 40px; z-index: -1; } .test { color: green; width: 2ch; white-space: pre-wrap; word-break: break-word; } </style> <body> <p>Test passes if there is a <strong>filled green square</strong> and <strong>no red</strong>.</p> <div class="red"><br>XX</div> <div class="test"> XX</div> </body>
{ "content_hash": "f3009be3455f6d6b37568ee6e694ca45", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 141, "avg_line_length": 30.44736842105263, "alnum_prop": 0.6862575626620571, "repo_name": "scheib/chromium", "id": "0590f669e918a8782142784080179b1f37cbaf45", "size": "1157", "binary": false, "copies": "33", "ref": "refs/heads/main", "path": "third_party/blink/web_tests/external/wpt/css/css-text/white-space/pre-wrap-016.html", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from bda.plone.shop.tests import set_browserlayer from bda.plone.shop.tests import Shop_INTEGRATION_TESTING from bda.plone.shop.utils import get_shop_settings import plone.api import unittest2 as unittest class TestUser(unittest.TestCase): layer = Shop_INTEGRATION_TESTING def setUp(self): self.portal = self.layer['portal'] self.request = self.layer['request'] set_browserlayer(self.request) def test_is_customer(self): """Test if a newly created user is granted the "Customer" role. """ self.assertTrue(get_shop_settings().add_customer_role_to_new_users) plone.api.user.create( email="user@test.com", username="testuser", password="testuser" ) self.assertTrue( 'Customer' in plone.api.user.get_roles(username="testuser") )
{ "content_hash": "2c4ee309b1c148216c7b8c2e2568fce3", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 75, "avg_line_length": 31.071428571428573, "alnum_prop": 0.6505747126436782, "repo_name": "TheVirtualLtd/bda.plone.shop", "id": "b9998ec13b9c229fa90fc1bdf5422efbf9dead39", "size": "894", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/bda/plone/shop/tests/test_user.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "6552" }, { "name": "JavaScript", "bytes": "2481" }, { "name": "Python", "bytes": "151173" }, { "name": "RobotFramework", "bytes": "10582" }, { "name": "Shell", "bytes": "2999" } ], "symlink_target": "" }
var crypto = require('crypto'), Utils = require('./utils.js'), utils = new Utils(); var Client = function(socket) { // Constructor this.connections = []; this.ID = crypto.randomBytes(64).toString('hex'); this.addConnection(socket); // Send MOTD socket.emit('message', {'message': 'Hello world'}); } Client.prototype.addConnection = function(socket) { this.connections.push(socket); // Set up handlers for new connection this.addHandlers(socket); // Acknowledge client socket.emit('registered', this.ID); } Client.prototype.openConnections = function() { return this.connections.length; } Client.prototype.addHandlers = function(socket) { var self = this; socket.on('disconnect', function() { console.log('[%s] %s [%s]', utils.shortID(socket.id), 'Disconnecting'.white, utils.shortID(self.ID)); // Find this connection in the list of open connections var connectionsLength = self.connections.length; for (i = 0; i < connectionsLength; i++) { if (self.connections[i].id == socket.id) { delete self.connections.splice(i, 1); console.log('[%s] %s [%s](%d)', utils.shortID(socket.id), 'Disconnected'.magenta, utils.shortID(self.ID), self.openConnections()); break; } } self.checkChildlessness(); }); } Client.prototype.checkChildlessness = function() { var self = this; if (this.childlessnessTimeout) { clearTimeout(this.childlessnessTimeout); } this.childlessnessTimeout = setTimeout(function() { // Are there any open connections, remove client if (self.connections.length === 0) { global.removeClient(self.ID); console.log('%s [%s]', 'Removed childless client'.magenta, utils.shortID(self.ID)); } }, 5000); } module.exports = Client;
{ "content_hash": "05e7b51746d132c055d0ad80a9b5411f", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 146, "avg_line_length": 28.029411764705884, "alnum_prop": 0.6201469045120671, "repo_name": "HackThis/NexClient", "id": "ea03c251a737e06d5bca8e6cad10e9cf44da6719", "size": "1906", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/lib/client.js", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "423" }, { "name": "JavaScript", "bytes": "4223" } ], "symlink_target": "" }
Playing with seaborn to create some graphs. Check out: http://mateusz.at/blog/2015/09/07/playing-with-seaborn---visualizing-natality-data
{ "content_hash": "3ccec4d3305d28288bce484d9a1cbc1f", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 82, "avg_line_length": 34.75, "alnum_prop": 0.7913669064748201, "repo_name": "masteusz/seaborn_natality", "id": "8c78cfaefd683d92dab0d0078e8dc5772af40708", "size": "158", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "README.md", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3926" } ], "symlink_target": "" }
package org.apache.harmony.jretools.policytool.model; /** * Abstract ancestor to represent a policy entry. */ public abstract class PolicyEntry { /** Terminator character of the policy entry texts. */ public static final char TERMINATOR_CHAR = ';'; /** * Returns the policy entry text.<br> * Should not contain a line separator in the end but <code>TERMINATOR_CHAR</code> must be included. * @return the policy entry text */ public abstract String getText(); }
{ "content_hash": "9438793a38a8c8cfcad0480ccbd70923", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 104, "avg_line_length": 25.25, "alnum_prop": 0.6831683168316832, "repo_name": "freeVM/freeVM", "id": "861fea942e71db4c7c4712616895b790e7c3efa8", "size": "1306", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "enhanced/java/jdktools/modules/jretools/src/main/java/org/apache/harmony/jretools/policytool/model/PolicyEntry.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "116828" }, { "name": "C", "bytes": "17860389" }, { "name": "C++", "bytes": "19007206" }, { "name": "CSS", "bytes": "217777" }, { "name": "Java", "bytes": "152108632" }, { "name": "Objective-C", "bytes": "106412" }, { "name": "Objective-J", "bytes": "11029421" }, { "name": "Perl", "bytes": "305690" }, { "name": "Scilab", "bytes": "34" }, { "name": "Shell", "bytes": "153821" }, { "name": "XSLT", "bytes": "152859" } ], "symlink_target": "" }
'use strict'; angular.module('core').controller('HomeController', ['$scope', 'Authentication', '$timeout', function($scope, Authentication, $timeout) { // This provides Authentication context. $scope.authentication = Authentication; $scope.quoteInterval = 7000; $scope.myInterval = 5000; $scope.quotes = [ '"So many books, so little time"', '"A reader lives a thousand lives before he dies, said Jojen. The man who never reads lives only one"', '"If you can make a woman laugh, you can make her do anything"', '"All extremes of feelings are allied with madness"', '"It does not do to dwell on dreams and forget to live"', '"I am not afraid of death, I just want to be there when it happens"', // 'If I offer you a glass of water and I bring back a glass of ice, I\'m trying to teach you patience. And also that sometimes you get ice with no water, and later you\'ll get water with no ice. Ah, but that\'s life, no? ''If I offer you a glass of water and I bring back a glass of ice, I\'m trying to teach you patience. And also that sometimes you get ice with no water, and later you\'ll get water with no ice. Ah, but that\'s life, no? ' ]; $scope.slides = [ 'modules/core/img/brand/backImage.jpg', 'modules/core/img/brand/backImage2.jpg' ]; // initial image index $scope._Index = 0; // initial quote index $scope._qIndex = 0; // if a current image is the same as requested image $scope.isActive = function (index) { return $scope._Index === index; }; // for quote $scope.isActiveQ = function (index) { return $scope._qIndex === index; }; // show prev image $scope.showPrev = function () { $scope._Index = ($scope._Index > 0) ? --$scope._Index : $scope.slides.length - 1; }; // show prev quote $scope.showPrevQ = function () { $scope._qIndex = ($scope._qIndex > 0) ? --$scope._qIndex : $scope.slides.length - 1; }; // show next image $scope.showNext = function () { $scope._Index = ($scope._Index < $scope.slides.length - 1) ? ++$scope._Index : 0; $timeout($scope.showNext, $scope.myInterval); }; // show next quote $scope.showNextQuote = function () { $scope._qIndex = ($scope._qIndex < $scope.quotes.length - 1) ? ++$scope._qIndex : 0; $timeout($scope.showNextQuote, $scope.quoteInterval); }; $scope.loadSlides = function(){ $timeout($scope.showNext, $scope.myInterval); }; $scope.loadQuotes = function(){ $timeout($scope.showNextQuote, $scope.quoteInterval); }; $scope.loadSlides(); $scope.loadQuotes(); } ]);
{ "content_hash": "5cef611d4086c86926f8c322ed84e703", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 446, "avg_line_length": 35.12328767123287, "alnum_prop": 0.6544461778471139, "repo_name": "andela-koni/recAread", "id": "216b392453b0426a3aa4e61abb618becc63ee9f4", "size": "2564", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "public/modules/core/controllers/home.client.controller.js", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3927" }, { "name": "JavaScript", "bytes": "112243" }, { "name": "Shell", "bytes": "669" } ], "symlink_target": "" }
cat /etc/alertmanager/alertmanager.yml |\ sed "s@#api_url: <url>#@api_url: '$SLACK_URL'@g" |\ sed "s@#channel: <channel>#@channel: '#$SLACK_CHANNEL'@g" |\ sed "s@#username: <user>#@username: '$SLACK_USER'@g" > /tmp/alertmanager.yml mv /tmp/alertmanager.yml /etc/alertmanager/alertmanager.yml set -- /bin/alertmanager "$@" exec "$@"
{ "content_hash": "68340f33d48be21ffea8f4ef7de8149c", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 80, "avg_line_length": 34.7, "alnum_prop": 0.6426512968299711, "repo_name": "joericearchitect/shared-infra", "id": "41e9d96cd473d3cfff9479c4203caebb0a164506", "size": "361", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "infra-modules/centralized-monitoring/docker/alertmanager/conf/docker-entrypoint.sh", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Groovy", "bytes": "5871" }, { "name": "HCL", "bytes": "141672" }, { "name": "Python", "bytes": "136096" }, { "name": "Shell", "bytes": "101547" } ], "symlink_target": "" }
aclocal autoheader autoconf automake --add-missing # vim: tabstop=2 shiftwidth=2 expandtab softtabstop=2
{ "content_hash": "3727c6cd848b7903dc14baa2c19f78d0", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 53, "avg_line_length": 17.666666666666668, "alnum_prop": 0.8113207547169812, "repo_name": "SumiTomohiko/luxcmd", "id": "20686bd92e14b4d853b61e3adf40267469b3cdc7", "size": "117", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "autogen.sh", "mode": "33261", "license": "mit", "language": [ { "name": "C++", "bytes": "114" }, { "name": "Shell", "bytes": "117" } ], "symlink_target": "" }
int main(int argc, char **argv) { // AR.Drone class ARDrone ardrone; // Initialize if (!ardrone.open()) { printf("Failed to initialize.\n"); return -1; } // Image of AR.Drone's camera IplImage *image = ardrone.getImage(); // Variables for optical flow int corner_count = 50; IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); IplImage *prev = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); cvCvtColor(image, prev, CV_BGR2GRAY); IplImage *eig_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); IplImage *tmp_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1); IplImage *prev_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1); IplImage *curr_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1); CvPoint2D32f *corners1 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f)); CvPoint2D32f *corners2 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f)); // Main loop while (1) { // Key input int key = cvWaitKey(1); if (key == 0x1b) break; // Update if (!ardrone.update()) break; // Get an image image = ardrone.getImage(); // Convert the camera image to grayscale cvCvtColor(image, gray, CV_BGR2GRAY); // Detect features int corner_count = 50; cvGoodFeaturesToTrack(prev, eig_img, tmp_img, corners1, &corner_count, 0.1, 5.0, NULL); // Corner detected if (corner_count > 0) { char *status = (char*)malloc(corner_count * sizeof(char)); // Calicurate optical flows CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3); cvCalcOpticalFlowPyrLK(prev, gray, prev_pyramid, curr_pyramid, corners1, corners2, corner_count, cvSize(10, 10), 3, status, NULL, criteria, 0); // Drow the optical flows for (int i = 0; i < corner_count; i++) { cvCircle(image, cvPointFrom32f(corners1[i]), 1, CV_RGB (255, 0, 0)); if (status[i]) cvLine(image, cvPointFrom32f(corners1[i]), cvPointFrom32f(corners2[i]), CV_RGB (0, 0, 255), 1, CV_AA, 0); } // Release the memory free(status); } // Save the last frame cvCopy(gray, prev); // Display the image cvShowImage("camera", image); } // Release the images cvReleaseImage(&gray); cvReleaseImage(&prev); cvReleaseImage(&eig_img); cvReleaseImage(&tmp_img); cvReleaseImage(&prev_pyramid); cvReleaseImage(&curr_pyramid); free(corners1); free(corners2); // See you ardrone.close(); return 0; }
{ "content_hash": "1e08b5252d95a3eddae605e0f192d922", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 155, "avg_line_length": 32.89411764705882, "alnum_prop": 0.6033619456366237, "repo_name": "tekkies/cvdrone", "id": "99aa1a4db9180a6e8d2d861bcd9a1b45b7002a09", "size": "3124", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "samples/old/sample_optical_flow.cpp", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "26151" }, { "name": "C++", "bytes": "150442" }, { "name": "Shell", "bytes": "687" } ], "symlink_target": "" }
package org.drools.ide.common.client.factconstraints.config; import java.util.HashMap; import java.util.Map; import java.util.Set; import org.drools.ide.common.client.factconstraints.ConstraintConfiguration; public class SimpleConstraintConfigurationImpl implements ConstraintConfiguration { private static final long serialVersionUID = 501l; private Map<String, String> args = new HashMap<String, String>(); private String constraintName = null; private String factType; private String fieldName; public SimpleConstraintConfigurationImpl(ConstraintConfiguration constraintConfiguration) { copyFrom(constraintConfiguration); } public SimpleConstraintConfigurationImpl() { } public Set<String> getArgumentKeys() { return args.keySet(); } public Object getArgumentValue(String key) { return args.get(key); } public String getConstraintName() { return constraintName; } public void setConstraintName(String constraintName) { this.constraintName = constraintName; } public String getFactType() { return factType; } public String getFieldName() { return fieldName; } public void setArgumentValue(String key, String value) { args.put(key, value); } public void setFactType(String factType) { this.factType = factType; } public void setFieldName(String fieldName) { this.fieldName = fieldName; } public boolean containsArgument(String key) { return args.containsKey(key); } @Override public String toString() { return "SimpleConstraintConfigurationImpl [args=" + args + ", constraintName=" + constraintName + ", factType=" + factType + ", fieldName=" + fieldName + "]"; } private void copyFrom(ConstraintConfiguration other) { if (constraintName != null) { throw new RuntimeException("can't copy configuration on a configured instance"); } this.constraintName = other.getConstraintName(); this.factType = other.getFactType(); this.fieldName = other.getFieldName(); this.args = new HashMap<String, String>(); for (String argName : other.getArgumentKeys()) { this.args.put(argName, (String) other.getArgumentValue(argName)); } } }
{ "content_hash": "1466568060b4870f626aaa5852387d8f", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 119, "avg_line_length": 28.30952380952381, "alnum_prop": 0.6715727502102607, "repo_name": "psiroky/guvnor", "id": "1c16b1976d682e9aec0d08572606c9be21025340", "size": "2971", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "droolsjbpm-ide-common/src/main/java/org/drools/ide/common/client/factconstraints/config/SimpleConstraintConfigurationImpl.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ActionScript", "bytes": "408232" }, { "name": "CSS", "bytes": "34688" }, { "name": "Java", "bytes": "10019368" }, { "name": "JavaScript", "bytes": "8801" }, { "name": "Shell", "bytes": "6807" } ], "symlink_target": "" }
import 'ng-redux'; import { attachAll, getNgModuleNames } from '../../other/boilerplate-utils.js'; const angular = require('angular'); require('angular-ui-bootstrap'); require('./desktop.css'); require('ngtouch'); require('angular-dropzone'); require('ng-tags-input'); require('angular-vs-repeat'); const ngDependencies = [ 'ui.router', 'ngAnimate', 'ngTouch', 'ngRedux', 'ui.bootstrap', 'ngTagsInput', 'vs-repeat', require('../common').name, require('./components/utilities.js')(angular).name, // Add additional external Angular dependencies here ]; const dependencies = getNgModuleNames(require.context('./routes', true, /index\.js$/)).filter(thing => thing); ngDependencies.push.apply(ngDependencies, dependencies); const ngModule = angular.module('da.desktop', ngDependencies) .constant('$', require('jquery')) .constant('d3', require('d3')) .constant('_', require('lodash')); attachAll(require.context('./components', true, /\.(component|directive)\.js$/))(ngModule); attachAll(require.context('./containers', true, /\.(component|directive)\.js$/))(ngModule); ngModule.config(require('./desktop.config.js')) .run(require('./desktop.init.js'));
{ "content_hash": "f190acdc75ed7359fcdbdeb32c0bb308", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 110, "avg_line_length": 31.756756756756758, "alnum_prop": 0.7004255319148937, "repo_name": "Jonathan-Law/fhd", "id": "3e4c0370d7acec486185a70acfde9ada8abb4928", "size": "1175", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/desktop/index.js", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "39271" }, { "name": "HTML", "bytes": "84761" }, { "name": "JavaScript", "bytes": "183197" }, { "name": "PHP", "bytes": "374752" } ], "symlink_target": "" }
ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
{ "content_hash": "bc6f4b1d06a447ecf11d52cc19fbbf99", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 39, "avg_line_length": 10.307692307692308, "alnum_prop": 0.6940298507462687, "repo_name": "mdoering/backbone", "id": "e777e51ef5e876deea6a00388b4afe1afca19b19", "size": "192", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Chlorophyta/Chlorophyceae/Tetrasporales/Palmellaceae/Palmoclathrus/Palmoclathrus stipitatus/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
var express = require('express'); var fs = require('fs'); var url = require('url'); if (process.env.REDISTOGO_URL) { var rtg = require("url").parse(process.env.REDISTOGO_URL); var redis = require("redis").createClient(rtg.port, rtg.hostname); redis.auth(rtg.auth.split(":")[1]); } else { var redis = require("redis").createClient(); } var app = express(); app.use(express.logger()); app.get('/shorten', function(request, response) { fs.readFile('./shorten.html', function(error, content) { if (error) { response.writeHead(500); response.end(); } else { response.writeHead(200, { 'Content-Type': 'text/html' }); response.end(content, 'utf-8'); } }); }); app.get('/success', function(request, response) { var url_parts = url.parse(request.url, true); var content = '<a href="http://meinde.rs/' + url_parts.query['path'] + '">Success</a>'; response.writeHead(200, { 'Content-Type': 'text/html' }); response.end(content, 'utf-8'); }); app.get('/shortenaction', function(request, response, next) { var url_parts = url.parse(request.url, true); console.log('shortenaction...'); var path = url_parts.query['path']; if (path != undefined && path != null && path != '') { redis.get('/' + path, function(err,reply){ console.log('reply: ' + reply); if (reply == null) { redis.set('/' + url_parts.query['path'],url_parts.query['url'],redis.print); response.redirect('success?path=' + url_parts.query['path']); response.end(); } else { response.writeHead(500); response.end(path + ' already used'); } } ); } else { response.writeHead(500); response.end(); } }); app.get('*', function(request, response, next) { var path = url.parse(request.url).pathname; console.log('path: ' + path); if (path != undefined && path != null && path != '') { redis.get(path, function(err,reply){ console.log('reply: ' + reply); if (reply != null) { response.redirect(reply); response.end(); console.log('redirecting...'); } else { response.writeHead(500); response.end(); } } ); } else { response.writeHead(500); response.end(); } }); var port = process.env.PORT || 3000; app.listen(port, function() { console.log("Listening on " + port); });
{ "content_hash": "8e876c0dd6a3959b58a1ce68713d39b4", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 89, "avg_line_length": 23.533980582524272, "alnum_prop": 0.5734323432343235, "repo_name": "tmeinders/meinders-shortener", "id": "2e64acc4aff0d5723347a3a37587b19a4b7c65bb", "size": "2424", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web.js", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "2424" } ], "symlink_target": "" }
/** * @author effine * @Date 2016年1月5日 下午6:26:55 * @email iballader#gmail.com * @site http://www.effine.cn */ package cn.effine.lab.rabbitmq; import com.rabbitmq.client.AMQP.BasicProperties; import com.rabbitmq.client.Consumer; import com.rabbitmq.client.Envelope; import com.rabbitmq.client.ShutdownSignalException; import org.apache.commons.lang.SerializationUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Map; /** * @author effine * @Date 2017-10-15 20:37 * 队列消费者 */ public class QueueConsumer extends BaseEndPoint implements Runnable, Consumer { private static Logger logger = LoggerFactory.getLogger(QueueConsumer.class); public QueueConsumer(String queueName) throws IOException { super(queueName); } @Override public void run() { try { // start consuming messages. Auto acknowledge messages. channel.basicConsume(queueName, true, this); } catch (IOException e) { logger.error(e.getMessage()); } } /** * Called when consumer is registered. */ @Override public void handleConsumeOk(String consumerTag) { System.out.println("Consumer " + consumerTag + " registered"); } /** * Called when new message is available. */ @Override public void handleDelivery(String consumerTag, Envelope env, BasicProperties props, byte[] body) throws IOException { @SuppressWarnings("unchecked") Map<String, Integer> map = (Map<String, Integer>) SerializationUtils.deserialize(body); System.out.println("Message Number " + map.get("message number") + " received."); } @Override public void handleCancel(String consumerTag) { } @Override public void handleCancelOk(String consumerTag) { } @Override public void handleRecoverOk(String consumerTag) { } @Override public void handleShutdownSignal(String consumerTag, ShutdownSignalException arg1) { } }
{ "content_hash": "e4a812ecb7ace3ee5f075841c34cc486", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 121, "avg_line_length": 27.192307692307693, "alnum_prop": 0.6572371522866572, "repo_name": "effine/shopping", "id": "be901eec041128729644cc9e26a39126ab13c202", "size": "2141", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/main/java/cn/effine/lab/rabbitmq/QueueConsumer.java", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "173744" } ], "symlink_target": "" }
<?php namespace Mief\Provider; /** * A way to provide a consistent result set to work with * Providers should return this. */ class ResultSet { /** * @var */ private $search_terms; /** * @var */ private $cover_url; /** * The URL to retrieve the cover art * * @param $cover_url */ public function setCoverUrl($cover_url) { $this->cover_url = $cover_url; } /** * The URL to retrieve the cover art * * @return mixed */ public function getCoverUrl() { return $this->cover_url; } /** * @param $search_terms */ public function setSearchTerms($search_terms) { $this->search_terms = $search_terms; } /** * @return mixed */ public function getSearchTerms() { return $this->search_terms; } }
{ "content_hash": "c66be86b80d71adb6be20b3f555257a5", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 56, "avg_line_length": 16.333333333333332, "alnum_prop": 0.5215419501133787, "repo_name": "rikvanderkemp/coversearch", "id": "217e58de7f2d2b714028f45ae04b29520f4e92aa", "size": "2018", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "coversearch/lib/Mief/Provider/ResultSet.php", "mode": "33188", "license": "mit", "language": [ { "name": "PHP", "bytes": "28279" }, { "name": "Shell", "bytes": "1299" } ], "symlink_target": "" }
// Code generated by protoc-gen-gogo. // source: cockroach/roachpb/api.proto // DO NOT EDIT! /* Package roachpb is a generated protocol buffer package. It is generated from these files: cockroach/roachpb/api.proto cockroach/roachpb/data.proto cockroach/roachpb/errors.proto cockroach/roachpb/internal.proto cockroach/roachpb/metadata.proto It has these top-level messages: ResponseHeader GetRequest GetResponse PutRequest PutResponse ConditionalPutRequest ConditionalPutResponse IncrementRequest IncrementResponse DeleteRequest DeleteResponse DeleteRangeRequest DeleteRangeResponse ScanRequest ScanResponse ReverseScanRequest ReverseScanResponse BeginTransactionRequest BeginTransactionResponse EndTransactionRequest EndTransactionResponse AdminSplitRequest AdminSplitResponse AdminMergeRequest AdminMergeResponse RangeLookupRequest RangeLookupResponse HeartbeatTxnRequest HeartbeatTxnResponse GCRequest GCResponse PushTxnRequest PushTxnResponse ResolveIntentRequest ResolveIntentResponse ResolveIntentRangeRequest NoopResponse NoopRequest ResolveIntentRangeResponse MergeRequest MergeResponse TruncateLogRequest TruncateLogResponse LeaderLeaseRequest LeaderLeaseResponse RequestUnion ResponseUnion Header BatchRequest BatchResponse Span Timestamp Value KeyValue StoreIdent SplitTrigger MergeTrigger ChangeReplicasTrigger ModifiedSpanTrigger InternalCommitTrigger NodeList TxnMeta Transaction Intent Lease SequenceCacheEntry NotLeaderError NodeUnavailableError RangeNotFoundError RangeKeyMismatchError ReadWithinUncertaintyIntervalError TransactionAbortedError TransactionPushError TransactionRetryError TransactionStatusError WriteIntentError WriteTooOldError OpRequiresTxnError ConditionFailedError LeaseRejectedError SendError RaftGroupDeletedError ReplicaCorruptionError LeaseVersionChangedError DidntUpdateDescriptorError SqlTransactionAbortedError ExistingSchemaChangeLeaseError ErrorDetail ErrPosition Error RaftCommand InternalTimeSeriesData InternalTimeSeriesSample RaftTruncatedState RaftTombstone RaftSnapshotData Attributes ReplicaDescriptor RangeDescriptor RangeTree RangeTreeNode StoreCapacity NodeDescriptor StoreDescriptor */ package roachpb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" // skipping weak import gogoproto "github.com/cockroachdb/gogoproto" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // ReadConsistencyType specifies what type of consistency is observed // during read operations. type ReadConsistencyType int32 const ( // CONSISTENT reads are guaranteed to read committed data; the // mechanism relies on clocks to determine lease expirations. CONSISTENT ReadConsistencyType = 0 // CONSENSUS requires that reads must achieve consensus. This is a // stronger guarantee of consistency than CONSISTENT. // // TODO(spencer): current unimplemented. CONSENSUS ReadConsistencyType = 1 // INCONSISTENT reads return the latest available, committed values. // They are more efficient, but may read stale values as pending // intents are ignored. INCONSISTENT ReadConsistencyType = 2 ) var ReadConsistencyType_name = map[int32]string{ 0: "CONSISTENT", 1: "CONSENSUS", 2: "INCONSISTENT", } var ReadConsistencyType_value = map[string]int32{ "CONSISTENT": 0, "CONSENSUS": 1, "INCONSISTENT": 2, } func (x ReadConsistencyType) Enum() *ReadConsistencyType { p := new(ReadConsistencyType) *p = x return p } func (x ReadConsistencyType) String() string { return proto.EnumName(ReadConsistencyType_name, int32(x)) } func (x *ReadConsistencyType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ReadConsistencyType_value, data, "ReadConsistencyType") if err != nil { return err } *x = ReadConsistencyType(value) return nil } // TxnPushType determines what action to take when pushing a transaction. type PushTxnType int32 const ( // Push the timestamp forward if possible to accommodate a concurrent reader. PUSH_TIMESTAMP PushTxnType = 0 // Abort the transaction if possible to accommodate a concurrent writer. PUSH_ABORT PushTxnType = 1 // Abort the transaction if it's abandoned, but don't attempt to mutate it // otherwise. PUSH_TOUCH PushTxnType = 2 ) var PushTxnType_name = map[int32]string{ 0: "PUSH_TIMESTAMP", 1: "PUSH_ABORT", 2: "PUSH_TOUCH", } var PushTxnType_value = map[string]int32{ "PUSH_TIMESTAMP": 0, "PUSH_ABORT": 1, "PUSH_TOUCH": 2, } func (x PushTxnType) Enum() *PushTxnType { p := new(PushTxnType) *p = x return p } func (x PushTxnType) String() string { return proto.EnumName(PushTxnType_name, int32(x)) } func (x *PushTxnType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PushTxnType_value, data, "PushTxnType") if err != nil { return err } *x = PushTxnType(value) return nil } // ResponseHeader is returned with every storage node response. type ResponseHeader struct { // timestamp specifies time at which read or write actually was // performed. In the case of both reads and writes, if the timestamp // supplied to the request was 0, the wall time of the node // servicing the request will be set here. Additionally, in the case // of writes, this value may be increased from the timestamp passed // with the Span if the key being written was either read // or written more recently. Timestamp Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp"` // txn is non-nil if the request specified a non-nil transaction. // The transaction timestamp and/or priority may have been updated, // depending on the outcome of the request. Txn *Transaction `protobuf:"bytes,3,opt,name=txn" json:"txn,omitempty"` } func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } func (*ResponseHeader) ProtoMessage() {} // A GetRequest is the argument for the Get() method. type GetRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} // A GetResponse is the return value from the Get() method. // If the key doesn't exist, returns nil for Value.Bytes. type GetResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} // A PutRequest is the argument to the Put() method. type PutRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Value Value `protobuf:"bytes,2,opt,name=value" json:"value"` } func (m *PutRequest) Reset() { *m = PutRequest{} } func (m *PutRequest) String() string { return proto.CompactTextString(m) } func (*PutRequest) ProtoMessage() {} // A PutResponse is the return value from the Put() method. type PutResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *PutResponse) Reset() { *m = PutResponse{} } func (m *PutResponse) String() string { return proto.CompactTextString(m) } func (*PutResponse) ProtoMessage() {} // A ConditionalPutRequest is the argument to the ConditionalPut() method. // // - Returns true and sets value if exp_value equals existing value. // - If key doesn't exist and exp_value is nil, sets value. // - If key exists, but value is empty and exp_value is not nil but empty, sets value. // - Otherwise, returns error and the actual value of the key in the response. type ConditionalPutRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // The value to put. Value Value `protobuf:"bytes,2,opt,name=value" json:"value"` // Set exp_value.bytes empty to test for non-existence. Specify as nil // to indicate there should be no existing entry. This is different // from the expectation that the value exists but is empty. ExpValue *Value `protobuf:"bytes,3,opt,name=exp_value" json:"exp_value,omitempty"` } func (m *ConditionalPutRequest) Reset() { *m = ConditionalPutRequest{} } func (m *ConditionalPutRequest) String() string { return proto.CompactTextString(m) } func (*ConditionalPutRequest) ProtoMessage() {} // A ConditionalPutResponse is the return value from the // ConditionalPut() method. type ConditionalPutResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *ConditionalPutResponse) Reset() { *m = ConditionalPutResponse{} } func (m *ConditionalPutResponse) String() string { return proto.CompactTextString(m) } func (*ConditionalPutResponse) ProtoMessage() {} // An IncrementRequest is the argument to the Increment() method. It // increments the value for key, and returns the new value. If no // value exists for a key, incrementing by 0 is not a noop, but will // create a zero value. IncrementRequest cannot be called on a key set // by Put() or ConditionalPut(). Similarly, Put() and ConditionalPut() // cannot be invoked on an incremented key. type IncrementRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Increment int64 `protobuf:"varint,2,opt,name=increment" json:"increment"` } func (m *IncrementRequest) Reset() { *m = IncrementRequest{} } func (m *IncrementRequest) String() string { return proto.CompactTextString(m) } func (*IncrementRequest) ProtoMessage() {} // An IncrementResponse is the return value from the Increment // method. The new value after increment is specified in NewValue. If // the value could not be decoded as specified, Error will be set. type IncrementResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` NewValue int64 `protobuf:"varint,2,opt,name=new_value" json:"new_value"` } func (m *IncrementResponse) Reset() { *m = IncrementResponse{} } func (m *IncrementResponse) String() string { return proto.CompactTextString(m) } func (*IncrementResponse) ProtoMessage() {} // A DeleteRequest is the argument to the Delete() method. type DeleteRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} // A DeleteResponse is the return value from the Delete() method. type DeleteResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } func (*DeleteResponse) ProtoMessage() {} // A DeleteRangeRequest is the argument to the DeleteRange() method. It // specifies the range of keys to delete. type DeleteRangeRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // If 0, *all* entries between key (inclusive) and end_key // (exclusive) are deleted. Must be >= 0. MaxEntriesToDelete int64 `protobuf:"varint,2,opt,name=max_entries_to_delete" json:"max_entries_to_delete"` } func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRangeRequest) ProtoMessage() {} // A DeleteRangeResponse is the return value from the DeleteRange() // method. type DeleteRangeResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Number of entries removed. NumDeleted int64 `protobuf:"varint,2,opt,name=num_deleted" json:"num_deleted"` } func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } func (*DeleteRangeResponse) ProtoMessage() {} // A ScanRequest is the argument to the Scan() method. It specifies the // start and end keys for an ascending scan of [start,end) and the maximum // number of results. type ScanRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // If 0, there is no limit on the number of retrieved entries. Must be >= 0. MaxResults int64 `protobuf:"varint,2,opt,name=max_results" json:"max_results"` } func (m *ScanRequest) Reset() { *m = ScanRequest{} } func (m *ScanRequest) String() string { return proto.CompactTextString(m) } func (*ScanRequest) ProtoMessage() {} // A ScanResponse is the return value from the Scan() method. type ScanResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Empty if no rows were scanned. Rows []KeyValue `protobuf:"bytes,2,rep,name=rows" json:"rows"` } func (m *ScanResponse) Reset() { *m = ScanResponse{} } func (m *ScanResponse) String() string { return proto.CompactTextString(m) } func (*ScanResponse) ProtoMessage() {} // A ReverseScanRequest is the argument to the ReverseScan() method. It specifies the // start and end keys for a descending scan of [start,end) and the maximum // number of results. type ReverseScanRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // If 0, there is no limit on the number of retrieved entries. Must be >= 0. MaxResults int64 `protobuf:"varint,2,opt,name=max_results" json:"max_results"` } func (m *ReverseScanRequest) Reset() { *m = ReverseScanRequest{} } func (m *ReverseScanRequest) String() string { return proto.CompactTextString(m) } func (*ReverseScanRequest) ProtoMessage() {} // A ReverseScanResponse is the return value from the ReverseScan() method. type ReverseScanResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Empty if no rows were scanned. Rows []KeyValue `protobuf:"bytes,2,rep,name=rows" json:"rows"` } func (m *ReverseScanResponse) Reset() { *m = ReverseScanResponse{} } func (m *ReverseScanResponse) String() string { return proto.CompactTextString(m) } func (*ReverseScanResponse) ProtoMessage() {} // A BeginTransactionRequest is the argument to the BeginTransaction() method. type BeginTransactionRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } func (*BeginTransactionRequest) ProtoMessage() {} // A BeginTransactionResponse is the return value from the BeginTransaction() method. type BeginTransactionResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} } func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) } func (*BeginTransactionResponse) ProtoMessage() {} // An EndTransactionRequest is the argument to the EndTransaction() method. It // specifies whether to commit or roll back an extant transaction. type EndTransactionRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // False to abort and rollback. Commit bool `protobuf:"varint,2,opt,name=commit" json:"commit"` // The deadline by which the transaction must commit, if present. Deadline *Timestamp `protobuf:"bytes,3,opt,name=deadline" json:"deadline,omitempty"` // Optional commit triggers. Note that commit triggers are for // internal use only and will cause an error if requested through the // external-facing KV API. InternalCommitTrigger *InternalCommitTrigger `protobuf:"bytes,4,opt,name=internal_commit_trigger" json:"internal_commit_trigger,omitempty"` // List of intents written by the transaction. IntentSpans []Span `protobuf:"bytes,5,rep,name=intent_spans" json:"intent_spans"` } func (m *EndTransactionRequest) Reset() { *m = EndTransactionRequest{} } func (m *EndTransactionRequest) String() string { return proto.CompactTextString(m) } func (*EndTransactionRequest) ProtoMessage() {} // An EndTransactionResponse is the return value from the // EndTransaction() method. The final transaction record is returned // as part of the response header. In particular, transaction status // and timestamp will be updated to reflect final committed // values. Clients may propagate the transaction timestamp as the // final txn commit timestamp in order to preserve causal ordering // between subsequent transactions. CommitWait specifies the commit // wait, which is the remaining time the client MUST wait before // signalling completion of the transaction to another distributed // node to maintain consistency. type EndTransactionResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Remaining time (ns). CommitWait int64 `protobuf:"varint,2,opt,name=commit_wait" json:"commit_wait"` // List of intents resolved by EndTransaction call. Resolved []Key `protobuf:"bytes,3,rep,name=resolved,casttype=Key" json:"resolved,omitempty"` } func (m *EndTransactionResponse) Reset() { *m = EndTransactionResponse{} } func (m *EndTransactionResponse) String() string { return proto.CompactTextString(m) } func (*EndTransactionResponse) ProtoMessage() {} // An AdminSplitRequest is the argument to the AdminSplit() method. The // existing range which contains header.key is split by // split_key. If split_key is not specified, then this method will // determine a split key that is roughly halfway through the // range. The existing range is resized to cover only its start key to // the split key. The new range created by the split starts at the // split key and extends to the original range's end key. If split_key // is known, header.key should also be set to split_key. // // New range IDs for each of the split range's replica and a new Raft // ID are generated by the operation. Split requests are done in the // context of a distributed transaction which updates range addressing // records, range metadata and finally, provides a commit trigger to // update bookkeeping and instantiate the new range on commit. // // The new range contains range replicas located on the same stores; // no range data is moved during this operation. The split can be // thought of as a mostly logical operation, though some other // metadata (e.g. sequence cache and range stats must be copied or // recomputed). type AdminSplitRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` SplitKey Key `protobuf:"bytes,2,opt,name=split_key,casttype=Key" json:"split_key,omitempty"` } func (m *AdminSplitRequest) Reset() { *m = AdminSplitRequest{} } func (m *AdminSplitRequest) String() string { return proto.CompactTextString(m) } func (*AdminSplitRequest) ProtoMessage() {} // An AdminSplitResponse is the return value from the AdminSplit() // method. type AdminSplitResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *AdminSplitResponse) Reset() { *m = AdminSplitResponse{} } func (m *AdminSplitResponse) String() string { return proto.CompactTextString(m) } func (*AdminSplitResponse) ProtoMessage() {} // An AdminMergeRequest is the argument to the AdminMerge() method. A // merge is performed by calling AdminMerge on the left-hand range of // two consecutive ranges (i.e. the range which contains keys which // sort first). This range will be the subsuming range and the right // hand range will be subsumed. After the merge operation, the // subsumed range will no longer exist and the subsuming range will // now encompass all keys from its original start key to the end key // of the subsumed range. If AdminMerge is called on the final range // in the key space, it is a noop. type AdminMergeRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *AdminMergeRequest) Reset() { *m = AdminMergeRequest{} } func (m *AdminMergeRequest) String() string { return proto.CompactTextString(m) } func (*AdminMergeRequest) ProtoMessage() {} // An AdminMergeResponse is the return value from the AdminMerge() // method. type AdminMergeResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *AdminMergeResponse) Reset() { *m = AdminMergeResponse{} } func (m *AdminMergeResponse) String() string { return proto.CompactTextString(m) } func (*AdminMergeResponse) ProtoMessage() {} // A RangeLookupRequest is arguments to the RangeLookup() method. A // forward lookup request returns a range containing the requested // key. A reverse lookup request returns a range containing the // previous key of the requested key (e.g., if a requested key is the // end key of range R, the reverse lookup request returns R). // // RangeLookupRequest also specifies the maximum number of range // descriptors that should be returned, if there are additional // consecutive addressable ranges. Specify max_ranges > 1 to pre-fill the // range descriptor cache. The additional ranges are scanned in the same // direction as lookup (forward v.s. reverse). type RangeLookupRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` MaxRanges int32 `protobuf:"varint,2,opt,name=max_ranges" json:"max_ranges"` // consider_intents indicates whether or not intents encountered // while looking up the range info should randomly be returned // to the caller. This is intended to be used when retrying due // to range addressing errors. ConsiderIntents bool `protobuf:"varint,3,opt,name=consider_intents" json:"consider_intents"` // Use a reverse scan to pre-fill the range descriptor cache instead // of an ascending scan. Reverse bool `protobuf:"varint,4,opt,name=reverse" json:"reverse"` } func (m *RangeLookupRequest) Reset() { *m = RangeLookupRequest{} } func (m *RangeLookupRequest) String() string { return proto.CompactTextString(m) } func (*RangeLookupRequest) ProtoMessage() {} // A RangeLookupResponse is the return value from the RangeLookup() // method. It returns metadata for the range containing the requested // key, optionally returning the metadata for additional consecutive // ranges beyond the requested range to pre-fill the range descriptor // cache. type RangeLookupResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Ranges []RangeDescriptor `protobuf:"bytes,2,rep,name=ranges" json:"ranges"` } func (m *RangeLookupResponse) Reset() { *m = RangeLookupResponse{} } func (m *RangeLookupResponse) String() string { return proto.CompactTextString(m) } func (*RangeLookupResponse) ProtoMessage() {} // A HeartbeatTxnRequest is arguments to the HeartbeatTxn() // method. It's sent by transaction coordinators to let the system // know that the transaction is still ongoing. Note that this // heartbeat message is different from the heartbeat message in the // gossip protocol. type HeartbeatTxnRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *HeartbeatTxnRequest) Reset() { *m = HeartbeatTxnRequest{} } func (m *HeartbeatTxnRequest) String() string { return proto.CompactTextString(m) } func (*HeartbeatTxnRequest) ProtoMessage() {} // A HeartbeatTxnResponse is the return value from the HeartbeatTxn() // method. It returns the transaction info in the response header. The // returned transaction lets the coordinator know the disposition of // the transaction (i.e. aborted, committed, or pending). type HeartbeatTxnResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *HeartbeatTxnResponse) Reset() { *m = HeartbeatTxnResponse{} } func (m *HeartbeatTxnResponse) String() string { return proto.CompactTextString(m) } func (*HeartbeatTxnResponse) ProtoMessage() {} // A GCRequest is arguments to the GC() method. It's sent by range // leaders after scanning range data to find expired MVCC values. type GCRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Keys []GCRequest_GCKey `protobuf:"bytes,3,rep,name=keys" json:"keys"` } func (m *GCRequest) Reset() { *m = GCRequest{} } func (m *GCRequest) String() string { return proto.CompactTextString(m) } func (*GCRequest) ProtoMessage() {} type GCRequest_GCKey struct { Key Key `protobuf:"bytes,1,opt,name=key,casttype=Key" json:"key,omitempty"` Timestamp Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp"` } func (m *GCRequest_GCKey) Reset() { *m = GCRequest_GCKey{} } func (m *GCRequest_GCKey) String() string { return proto.CompactTextString(m) } func (*GCRequest_GCKey) ProtoMessage() {} // A GCResponse is the return value from the GC() method. type GCResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *GCResponse) Reset() { *m = GCResponse{} } func (m *GCResponse) String() string { return proto.CompactTextString(m) } func (*GCResponse) ProtoMessage() {} // A PushTxnRequest is arguments to the PushTxn() method. It's sent by // readers or writers which have encountered an "intent" laid down by // another transaction. The goal is to resolve the conflict. Note that // args.Key should be set to the txn ID of args.PusheeTxn, not // args.PusherTxn. This RPC is addressed to the range which owns the pushee's // txn record. If the pusher is not transactional, it must be set to a // Transaction record with only the Priority present. // // Resolution is trivial if the txn which owns the intent has either // been committed or aborted already. Otherwise, the existing txn can // either be aborted (for write/write conflicts), or its commit // timestamp can be moved forward (for read/write conflicts). The // course of action is determined by the specified push type, and by // the owning txn's status and priority. type PushTxnRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Transaction which encountered the intent, if applicable. For a // non-transactional operation, pusher_txn will be nil. Used to // compare priorities and timestamps if priorities are equal. PusherTxn Transaction `protobuf:"bytes,2,opt,name=pusher_txn" json:"pusher_txn"` // Transaction to be pushed, as specified at the intent which led to // the push transaction request. Note that this may not be the most // up-to-date value of the transaction record, but will be set or // merged as appropriate. PusheeTxn TxnMeta `protobuf:"bytes,3,opt,name=pushee_txn" json:"pushee_txn"` // PushTo is the timestamp just after which PusheeTxn is attempted to be // pushed. During conflict resolution, it should be set to the timestamp // of the its conflicting write. PushTo Timestamp `protobuf:"bytes,4,opt,name=push_to" json:"push_to"` // Now holds the timestamp used to compare the last heartbeat of the pushee // against. This is necessary since the request header's timestamp does not // necessarily advance with the node clock across retries and hence cannot // detect abandoned transactions. Now Timestamp `protobuf:"bytes,5,opt,name=now" json:"now"` // Readers set this to PUSH_TIMESTAMP to move pushee_txn's provisional // commit timestamp forward. Writers set this to PUSH_ABORT to request // that pushee_txn be aborted if possible. Inconsistent readers set // this to PUSH_TOUCH to determine whether the pushee can be aborted // due to inactivity (based on the now field). PushType PushTxnType `protobuf:"varint,6,opt,name=push_type,enum=cockroach.roachpb.PushTxnType" json:"push_type"` } func (m *PushTxnRequest) Reset() { *m = PushTxnRequest{} } func (m *PushTxnRequest) String() string { return proto.CompactTextString(m) } func (*PushTxnRequest) ProtoMessage() {} // A PushTxnResponse is the return value from the PushTxn() method. It // returns success and the resulting state of PusheeTxn if the // conflict was resolved in favor of the caller; the caller should // subsequently invoke ResolveIntent() on the conflicted key. It // returns an error otherwise. type PushTxnResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // pushee_txn is non-nil if the transaction was pushed and contains // the current value of the transaction. // TODO(tschottdorf): Maybe this can be a TxnMeta instead; probably requires // factoring out the new Priority. PusheeTxn Transaction `protobuf:"bytes,2,opt,name=pushee_txn" json:"pushee_txn"` } func (m *PushTxnResponse) Reset() { *m = PushTxnResponse{} } func (m *PushTxnResponse) String() string { return proto.CompactTextString(m) } func (*PushTxnResponse) ProtoMessage() {} // A ResolveIntentRequest is arguments to the ResolveIntent() // method. It is sent by transaction coordinators after success // calling PushTxn to clean up write intents: either to remove, commit // or move them forward in time. type ResolveIntentRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // The transaction whose intent is being resolved. IntentTxn TxnMeta `protobuf:"bytes,2,opt,name=intent_txn" json:"intent_txn"` // The status of the transaction. Status TransactionStatus `protobuf:"varint,3,opt,name=status,enum=cockroach.roachpb.TransactionStatus" json:"status"` // Optionally poison the sequence cache for the transaction the intent's // range. Poison bool `protobuf:"varint,4,opt,name=poison" json:"poison"` } func (m *ResolveIntentRequest) Reset() { *m = ResolveIntentRequest{} } func (m *ResolveIntentRequest) String() string { return proto.CompactTextString(m) } func (*ResolveIntentRequest) ProtoMessage() {} // A ResolveIntentResponse is the return value from the // ResolveIntent() method. type ResolveIntentResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *ResolveIntentResponse) Reset() { *m = ResolveIntentResponse{} } func (m *ResolveIntentResponse) String() string { return proto.CompactTextString(m) } func (*ResolveIntentResponse) ProtoMessage() {} // A ResolveIntentRangeRequest is arguments to the ResolveIntentRange() method. // It is sent by transaction coordinators after success calling PushTxn to // clean up write intents: either to remove, commit or move them forward in // time. type ResolveIntentRangeRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // The transaction whose intents are being resolved. IntentTxn TxnMeta `protobuf:"bytes,2,opt,name=intent_txn" json:"intent_txn"` // The status of the transaction. Status TransactionStatus `protobuf:"varint,3,opt,name=status,enum=cockroach.roachpb.TransactionStatus" json:"status"` // Optionally poison the sequence cache for the transaction on all ranges // on which the intents reside. Poison bool `protobuf:"varint,4,opt,name=poison" json:"poison"` } func (m *ResolveIntentRangeRequest) Reset() { *m = ResolveIntentRangeRequest{} } func (m *ResolveIntentRangeRequest) String() string { return proto.CompactTextString(m) } func (*ResolveIntentRangeRequest) ProtoMessage() {} // A NoopResponse is the return value from a no-op operation. type NoopResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *NoopResponse) Reset() { *m = NoopResponse{} } func (m *NoopResponse) String() string { return proto.CompactTextString(m) } func (*NoopResponse) ProtoMessage() {} // A NoopRequest is a no-op. type NoopRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *NoopRequest) Reset() { *m = NoopRequest{} } func (m *NoopRequest) String() string { return proto.CompactTextString(m) } func (*NoopRequest) ProtoMessage() {} // A ResolveIntentRangeResponse is the return value from the // ResolveIntent() method. type ResolveIntentRangeResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *ResolveIntentRangeResponse) Reset() { *m = ResolveIntentRangeResponse{} } func (m *ResolveIntentRangeResponse) String() string { return proto.CompactTextString(m) } func (*ResolveIntentRangeResponse) ProtoMessage() {} // A MergeRequest contains arguments to the Merge() method. It // specifies a key and a value which should be merged into the // existing value at that key. type MergeRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Value Value `protobuf:"bytes,2,opt,name=value" json:"value"` } func (m *MergeRequest) Reset() { *m = MergeRequest{} } func (m *MergeRequest) String() string { return proto.CompactTextString(m) } func (*MergeRequest) ProtoMessage() {} // MergeResponse is the response to a Merge() operation. type MergeResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *MergeResponse) Reset() { *m = MergeResponse{} } func (m *MergeResponse) String() string { return proto.CompactTextString(m) } func (*MergeResponse) ProtoMessage() {} // TruncateLogRequest is used to remove a prefix of the raft log. While there // is no requirement for correctness that the raft log truncation be synchronized across // replicas, it is nice to preserve the property that all replicas of a range are as close // to identical as possible. The raft leader can also inform decisions about the cutoff point // with its knowledge of the replicas' acknowledgment status. type TruncateLogRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` // Log entries < this index are to be discarded. Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` // RangeID is used to double check that the correct range is being truncated. // The header specifies a span, start and end keys, but not the range id // itself. The range may have changed from the one specified in the header // in the case of a merge. RangeID RangeID `protobuf:"varint,3,opt,name=range_id,casttype=RangeID" json:"range_id"` } func (m *TruncateLogRequest) Reset() { *m = TruncateLogRequest{} } func (m *TruncateLogRequest) String() string { return proto.CompactTextString(m) } func (*TruncateLogRequest) ProtoMessage() {} // TruncateLogResponse is the response to a TruncateLog() operation. type TruncateLogResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *TruncateLogResponse) Reset() { *m = TruncateLogResponse{} } func (m *TruncateLogResponse) String() string { return proto.CompactTextString(m) } func (*TruncateLogResponse) ProtoMessage() {} // A LeaderLeaseRequest is arguments to the LeaderLease() // method. It is sent by the store on behalf of one of its ranges upon receipt // of a leader election event for that range. type LeaderLeaseRequest struct { Span `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Lease Lease `protobuf:"bytes,2,opt,name=lease" json:"lease"` } func (m *LeaderLeaseRequest) Reset() { *m = LeaderLeaseRequest{} } func (m *LeaderLeaseRequest) String() string { return proto.CompactTextString(m) } func (*LeaderLeaseRequest) ProtoMessage() {} // A LeaderLeaseResponse is the response to a LeaderLease() // operation. type LeaderLeaseResponse struct { ResponseHeader `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` } func (m *LeaderLeaseResponse) Reset() { *m = LeaderLeaseResponse{} } func (m *LeaderLeaseResponse) String() string { return proto.CompactTextString(m) } func (*LeaderLeaseResponse) ProtoMessage() {} // A RequestUnion contains exactly one of the optional requests. // The values added here must match those in ResponseUnion. type RequestUnion struct { Get *GetRequest `protobuf:"bytes,1,opt,name=get" json:"get,omitempty"` Put *PutRequest `protobuf:"bytes,2,opt,name=put" json:"put,omitempty"` ConditionalPut *ConditionalPutRequest `protobuf:"bytes,3,opt,name=conditional_put" json:"conditional_put,omitempty"` Increment *IncrementRequest `protobuf:"bytes,4,opt,name=increment" json:"increment,omitempty"` Delete *DeleteRequest `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` DeleteRange *DeleteRangeRequest `protobuf:"bytes,6,opt,name=delete_range" json:"delete_range,omitempty"` Scan *ScanRequest `protobuf:"bytes,7,opt,name=scan" json:"scan,omitempty"` BeginTransaction *BeginTransactionRequest `protobuf:"bytes,8,opt,name=begin_transaction" json:"begin_transaction,omitempty"` EndTransaction *EndTransactionRequest `protobuf:"bytes,9,opt,name=end_transaction" json:"end_transaction,omitempty"` AdminSplit *AdminSplitRequest `protobuf:"bytes,10,opt,name=admin_split" json:"admin_split,omitempty"` AdminMerge *AdminMergeRequest `protobuf:"bytes,11,opt,name=admin_merge" json:"admin_merge,omitempty"` HeartbeatTxn *HeartbeatTxnRequest `protobuf:"bytes,12,opt,name=heartbeat_txn" json:"heartbeat_txn,omitempty"` Gc *GCRequest `protobuf:"bytes,13,opt,name=gc" json:"gc,omitempty"` PushTxn *PushTxnRequest `protobuf:"bytes,14,opt,name=push_txn" json:"push_txn,omitempty"` RangeLookup *RangeLookupRequest `protobuf:"bytes,15,opt,name=range_lookup" json:"range_lookup,omitempty"` ResolveIntent *ResolveIntentRequest `protobuf:"bytes,16,opt,name=resolve_intent" json:"resolve_intent,omitempty"` ResolveIntentRange *ResolveIntentRangeRequest `protobuf:"bytes,17,opt,name=resolve_intent_range" json:"resolve_intent_range,omitempty"` Merge *MergeRequest `protobuf:"bytes,18,opt,name=merge" json:"merge,omitempty"` TruncateLog *TruncateLogRequest `protobuf:"bytes,19,opt,name=truncate_log" json:"truncate_log,omitempty"` LeaderLease *LeaderLeaseRequest `protobuf:"bytes,20,opt,name=leader_lease" json:"leader_lease,omitempty"` ReverseScan *ReverseScanRequest `protobuf:"bytes,21,opt,name=reverse_scan" json:"reverse_scan,omitempty"` Noop *NoopRequest `protobuf:"bytes,22,opt,name=noop" json:"noop,omitempty"` } func (m *RequestUnion) Reset() { *m = RequestUnion{} } func (m *RequestUnion) String() string { return proto.CompactTextString(m) } func (*RequestUnion) ProtoMessage() {} // A ResponseUnion contains exactly one of the optional responses. // The values added here must match those in RequestUnion. type ResponseUnion struct { Get *GetResponse `protobuf:"bytes,1,opt,name=get" json:"get,omitempty"` Put *PutResponse `protobuf:"bytes,2,opt,name=put" json:"put,omitempty"` ConditionalPut *ConditionalPutResponse `protobuf:"bytes,3,opt,name=conditional_put" json:"conditional_put,omitempty"` Increment *IncrementResponse `protobuf:"bytes,4,opt,name=increment" json:"increment,omitempty"` Delete *DeleteResponse `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` DeleteRange *DeleteRangeResponse `protobuf:"bytes,6,opt,name=delete_range" json:"delete_range,omitempty"` Scan *ScanResponse `protobuf:"bytes,7,opt,name=scan" json:"scan,omitempty"` BeginTransaction *BeginTransactionResponse `protobuf:"bytes,8,opt,name=begin_transaction" json:"begin_transaction,omitempty"` EndTransaction *EndTransactionResponse `protobuf:"bytes,9,opt,name=end_transaction" json:"end_transaction,omitempty"` AdminSplit *AdminSplitResponse `protobuf:"bytes,10,opt,name=admin_split" json:"admin_split,omitempty"` AdminMerge *AdminMergeResponse `protobuf:"bytes,11,opt,name=admin_merge" json:"admin_merge,omitempty"` HeartbeatTxn *HeartbeatTxnResponse `protobuf:"bytes,12,opt,name=heartbeat_txn" json:"heartbeat_txn,omitempty"` Gc *GCResponse `protobuf:"bytes,13,opt,name=gc" json:"gc,omitempty"` PushTxn *PushTxnResponse `protobuf:"bytes,14,opt,name=push_txn" json:"push_txn,omitempty"` RangeLookup *RangeLookupResponse `protobuf:"bytes,15,opt,name=range_lookup" json:"range_lookup,omitempty"` ResolveIntent *ResolveIntentResponse `protobuf:"bytes,16,opt,name=resolve_intent" json:"resolve_intent,omitempty"` ResolveIntentRange *ResolveIntentRangeResponse `protobuf:"bytes,17,opt,name=resolve_intent_range" json:"resolve_intent_range,omitempty"` Merge *MergeResponse `protobuf:"bytes,18,opt,name=merge" json:"merge,omitempty"` TruncateLog *TruncateLogResponse `protobuf:"bytes,19,opt,name=truncate_log" json:"truncate_log,omitempty"` LeaderLease *LeaderLeaseResponse `protobuf:"bytes,20,opt,name=leader_lease" json:"leader_lease,omitempty"` ReverseScan *ReverseScanResponse `protobuf:"bytes,21,opt,name=reverse_scan" json:"reverse_scan,omitempty"` Noop *NoopResponse `protobuf:"bytes,22,opt,name=noop" json:"noop,omitempty"` } func (m *ResponseUnion) Reset() { *m = ResponseUnion{} } func (m *ResponseUnion) String() string { return proto.CompactTextString(m) } func (*ResponseUnion) ProtoMessage() {} // A Header is attached to a BatchRequest, encapsulating routing and auxiliary // information required for executing it. type Header struct { // timestamp specifies time at which read or writes should be // performed. If the timestamp is set to zero value, its value // is initialized to the wall time of the receiving node. Timestamp Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp"` // replica specifies the destination of the request. Replica ReplicaDescriptor `protobuf:"bytes,2,opt,name=replica" json:"replica"` // range_id specifies the ID of the Raft consensus group which the key // range belongs to. This is used by the receiving node to route the // request to the correct range. RangeID RangeID `protobuf:"varint,3,opt,name=range_id,casttype=RangeID" json:"range_id"` // user_priority allows any command's priority to be biased from the // default random priority. It specifies a multiple. If set to 0.5, // the chosen priority will be 1/2x as likely to beat any default // random priority. If set to 1, a default random priority is // chosen. If set to 2, the chosen priority will be 2x as likely to // beat any default random priority, and so on. As a special case, 0 // priority is treated the same as 1. This value is ignored if txn // is specified. The min and max user priorities are set via // MinUserPriority and MaxUserPriority in data.go. UserPriority UserPriority `protobuf:"fixed64,4,opt,name=user_priority,casttype=UserPriority" json:"user_priority"` // txn is set non-nil if a transaction is underway. To start a txn, // the first request should set this field to non-nil with name and // isolation level set as desired. The response will contain the // fully-initialized transaction with txn ID, priority, initial // timestamp, and maximum timestamp. Txn *Transaction `protobuf:"bytes,5,opt,name=txn" json:"txn,omitempty"` // read_consistency specifies the consistency for read // operations. The default is CONSISTENT. This value is ignored for // write operations. ReadConsistency ReadConsistencyType `protobuf:"varint,6,opt,name=read_consistency,enum=cockroach.roachpb.ReadConsistencyType" json:"read_consistency"` } func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} // A BatchRequest contains one or more requests to be executed in // parallel, or if applicable (based on write-only commands and // range-locality), as a single update. // // The Span should contain the Key of the first request // in the batch. It also contains the transaction itself; individual // calls must not have transactions specified. The same applies to // the User and UserPriority fields. type BatchRequest struct { Header `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Requests []RequestUnion `protobuf:"bytes,2,rep,name=requests" json:"requests"` } func (m *BatchRequest) Reset() { *m = BatchRequest{} } func (*BatchRequest) ProtoMessage() {} // A BatchResponse contains one or more responses, one per request // corresponding to the requests in the matching BatchRequest. The // error in the response header is set to the first error from the // slice of responses, if applicable. type BatchResponse struct { BatchResponse_Header `protobuf:"bytes,1,opt,name=header,embedded=header" json:"header"` Responses []ResponseUnion `protobuf:"bytes,2,rep,name=responses" json:"responses"` } func (m *BatchResponse) Reset() { *m = BatchResponse{} } func (*BatchResponse) ProtoMessage() {} type BatchResponse_Header struct { // error is non-nil if an error occurred. Error *Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` // timestamp specifies time at which read or write actually was // performed. In the case of both reads and writes, if the timestamp // supplied to the request was 0, the wall time of the node // servicing the request will be set here. Additionally, in the case // of writes, this value may be increased from the timestamp passed // with the Span if the key being written was either read // or written more recently. Timestamp Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp"` // txn is non-nil if the request specified a non-nil // transaction. The transaction timestamp and/or priority may have // been updated, depending on the outcome of the request. Txn *Transaction `protobuf:"bytes,3,opt,name=txn" json:"txn,omitempty"` } func (m *BatchResponse_Header) Reset() { *m = BatchResponse_Header{} } func (m *BatchResponse_Header) String() string { return proto.CompactTextString(m) } func (*BatchResponse_Header) ProtoMessage() {} func init() { proto.RegisterType((*ResponseHeader)(nil), "cockroach.roachpb.ResponseHeader") proto.RegisterType((*GetRequest)(nil), "cockroach.roachpb.GetRequest") proto.RegisterType((*GetResponse)(nil), "cockroach.roachpb.GetResponse") proto.RegisterType((*PutRequest)(nil), "cockroach.roachpb.PutRequest") proto.RegisterType((*PutResponse)(nil), "cockroach.roachpb.PutResponse") proto.RegisterType((*ConditionalPutRequest)(nil), "cockroach.roachpb.ConditionalPutRequest") proto.RegisterType((*ConditionalPutResponse)(nil), "cockroach.roachpb.ConditionalPutResponse") proto.RegisterType((*IncrementRequest)(nil), "cockroach.roachpb.IncrementRequest") proto.RegisterType((*IncrementResponse)(nil), "cockroach.roachpb.IncrementResponse") proto.RegisterType((*DeleteRequest)(nil), "cockroach.roachpb.DeleteRequest") proto.RegisterType((*DeleteResponse)(nil), "cockroach.roachpb.DeleteResponse") proto.RegisterType((*DeleteRangeRequest)(nil), "cockroach.roachpb.DeleteRangeRequest") proto.RegisterType((*DeleteRangeResponse)(nil), "cockroach.roachpb.DeleteRangeResponse") proto.RegisterType((*ScanRequest)(nil), "cockroach.roachpb.ScanRequest") proto.RegisterType((*ScanResponse)(nil), "cockroach.roachpb.ScanResponse") proto.RegisterType((*ReverseScanRequest)(nil), "cockroach.roachpb.ReverseScanRequest") proto.RegisterType((*ReverseScanResponse)(nil), "cockroach.roachpb.ReverseScanResponse") proto.RegisterType((*BeginTransactionRequest)(nil), "cockroach.roachpb.BeginTransactionRequest") proto.RegisterType((*BeginTransactionResponse)(nil), "cockroach.roachpb.BeginTransactionResponse") proto.RegisterType((*EndTransactionRequest)(nil), "cockroach.roachpb.EndTransactionRequest") proto.RegisterType((*EndTransactionResponse)(nil), "cockroach.roachpb.EndTransactionResponse") proto.RegisterType((*AdminSplitRequest)(nil), "cockroach.roachpb.AdminSplitRequest") proto.RegisterType((*AdminSplitResponse)(nil), "cockroach.roachpb.AdminSplitResponse") proto.RegisterType((*AdminMergeRequest)(nil), "cockroach.roachpb.AdminMergeRequest") proto.RegisterType((*AdminMergeResponse)(nil), "cockroach.roachpb.AdminMergeResponse") proto.RegisterType((*RangeLookupRequest)(nil), "cockroach.roachpb.RangeLookupRequest") proto.RegisterType((*RangeLookupResponse)(nil), "cockroach.roachpb.RangeLookupResponse") proto.RegisterType((*HeartbeatTxnRequest)(nil), "cockroach.roachpb.HeartbeatTxnRequest") proto.RegisterType((*HeartbeatTxnResponse)(nil), "cockroach.roachpb.HeartbeatTxnResponse") proto.RegisterType((*GCRequest)(nil), "cockroach.roachpb.GCRequest") proto.RegisterType((*GCRequest_GCKey)(nil), "cockroach.roachpb.GCRequest.GCKey") proto.RegisterType((*GCResponse)(nil), "cockroach.roachpb.GCResponse") proto.RegisterType((*PushTxnRequest)(nil), "cockroach.roachpb.PushTxnRequest") proto.RegisterType((*PushTxnResponse)(nil), "cockroach.roachpb.PushTxnResponse") proto.RegisterType((*ResolveIntentRequest)(nil), "cockroach.roachpb.ResolveIntentRequest") proto.RegisterType((*ResolveIntentResponse)(nil), "cockroach.roachpb.ResolveIntentResponse") proto.RegisterType((*ResolveIntentRangeRequest)(nil), "cockroach.roachpb.ResolveIntentRangeRequest") proto.RegisterType((*NoopResponse)(nil), "cockroach.roachpb.NoopResponse") proto.RegisterType((*NoopRequest)(nil), "cockroach.roachpb.NoopRequest") proto.RegisterType((*ResolveIntentRangeResponse)(nil), "cockroach.roachpb.ResolveIntentRangeResponse") proto.RegisterType((*MergeRequest)(nil), "cockroach.roachpb.MergeRequest") proto.RegisterType((*MergeResponse)(nil), "cockroach.roachpb.MergeResponse") proto.RegisterType((*TruncateLogRequest)(nil), "cockroach.roachpb.TruncateLogRequest") proto.RegisterType((*TruncateLogResponse)(nil), "cockroach.roachpb.TruncateLogResponse") proto.RegisterType((*LeaderLeaseRequest)(nil), "cockroach.roachpb.LeaderLeaseRequest") proto.RegisterType((*LeaderLeaseResponse)(nil), "cockroach.roachpb.LeaderLeaseResponse") proto.RegisterType((*RequestUnion)(nil), "cockroach.roachpb.RequestUnion") proto.RegisterType((*ResponseUnion)(nil), "cockroach.roachpb.ResponseUnion") proto.RegisterType((*Header)(nil), "cockroach.roachpb.Header") proto.RegisterType((*BatchRequest)(nil), "cockroach.roachpb.BatchRequest") proto.RegisterType((*BatchResponse)(nil), "cockroach.roachpb.BatchResponse") proto.RegisterType((*BatchResponse_Header)(nil), "cockroach.roachpb.BatchResponse.Header") proto.RegisterEnum("cockroach.roachpb.ReadConsistencyType", ReadConsistencyType_name, ReadConsistencyType_value) proto.RegisterEnum("cockroach.roachpb.PushTxnType", PushTxnType_name, PushTxnType_value) } func (m *ResponseHeader) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResponseHeader) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Timestamp.Size())) n1, err := m.Timestamp.MarshalTo(data[i:]) if err != nil { return 0, err } i += n1 if m.Txn != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.Txn.Size())) n2, err := m.Txn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n2 } return i, nil } func (m *GetRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *GetRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n3, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n3 return i, nil } func (m *GetResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *GetResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n4, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n4 if m.Value != nil { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Value.Size())) n5, err := m.Value.MarshalTo(data[i:]) if err != nil { return 0, err } i += n5 } return i, nil } func (m *PutRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *PutRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n6, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n6 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Value.Size())) n7, err := m.Value.MarshalTo(data[i:]) if err != nil { return 0, err } i += n7 return i, nil } func (m *PutResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *PutResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n8, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n8 return i, nil } func (m *ConditionalPutRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ConditionalPutRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n9, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n9 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Value.Size())) n10, err := m.Value.MarshalTo(data[i:]) if err != nil { return 0, err } i += n10 if m.ExpValue != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.ExpValue.Size())) n11, err := m.ExpValue.MarshalTo(data[i:]) if err != nil { return 0, err } i += n11 } return i, nil } func (m *ConditionalPutResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ConditionalPutResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n12, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n12 return i, nil } func (m *IncrementRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *IncrementRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n13, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n13 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.Increment)) return i, nil } func (m *IncrementResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *IncrementResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n14, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n14 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.NewValue)) return i, nil } func (m *DeleteRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *DeleteRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n15, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n15 return i, nil } func (m *DeleteResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *DeleteResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n16, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n16 return i, nil } func (m *DeleteRangeRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *DeleteRangeRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n17, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n17 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.MaxEntriesToDelete)) return i, nil } func (m *DeleteRangeResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *DeleteRangeResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n18, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n18 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.NumDeleted)) return i, nil } func (m *ScanRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ScanRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n19, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n19 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.MaxResults)) return i, nil } func (m *ScanResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ScanResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n20, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n20 if len(m.Rows) > 0 { for _, msg := range m.Rows { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *ReverseScanRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ReverseScanRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n21, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n21 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.MaxResults)) return i, nil } func (m *ReverseScanResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ReverseScanResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n22, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n22 if len(m.Rows) > 0 { for _, msg := range m.Rows { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *BeginTransactionRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *BeginTransactionRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n23, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n23 return i, nil } func (m *BeginTransactionResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *BeginTransactionResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n24, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n24 return i, nil } func (m *EndTransactionRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *EndTransactionRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n25, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n25 data[i] = 0x10 i++ if m.Commit { data[i] = 1 } else { data[i] = 0 } i++ if m.Deadline != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.Deadline.Size())) n26, err := m.Deadline.MarshalTo(data[i:]) if err != nil { return 0, err } i += n26 } if m.InternalCommitTrigger != nil { data[i] = 0x22 i++ i = encodeVarintApi(data, i, uint64(m.InternalCommitTrigger.Size())) n27, err := m.InternalCommitTrigger.MarshalTo(data[i:]) if err != nil { return 0, err } i += n27 } if len(m.IntentSpans) > 0 { for _, msg := range m.IntentSpans { data[i] = 0x2a i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *EndTransactionResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *EndTransactionResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n28, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n28 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.CommitWait)) if len(m.Resolved) > 0 { for _, b := range m.Resolved { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(len(b))) i += copy(data[i:], b) } } return i, nil } func (m *AdminSplitRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *AdminSplitRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n29, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n29 if m.SplitKey != nil { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(len(m.SplitKey))) i += copy(data[i:], m.SplitKey) } return i, nil } func (m *AdminSplitResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *AdminSplitResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n30, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n30 return i, nil } func (m *AdminMergeRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *AdminMergeRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n31, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n31 return i, nil } func (m *AdminMergeResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *AdminMergeResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n32, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n32 return i, nil } func (m *RangeLookupRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *RangeLookupRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n33, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n33 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.MaxRanges)) data[i] = 0x18 i++ if m.ConsiderIntents { data[i] = 1 } else { data[i] = 0 } i++ data[i] = 0x20 i++ if m.Reverse { data[i] = 1 } else { data[i] = 0 } i++ return i, nil } func (m *RangeLookupResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *RangeLookupResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n34, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n34 if len(m.Ranges) > 0 { for _, msg := range m.Ranges { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *HeartbeatTxnRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *HeartbeatTxnRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n35, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n35 return i, nil } func (m *HeartbeatTxnResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *HeartbeatTxnResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n36, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n36 return i, nil } func (m *GCRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *GCRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n37, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n37 if len(m.Keys) > 0 { for _, msg := range m.Keys { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *GCRequest_GCKey) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *GCRequest_GCKey) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l if m.Key != nil { data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(len(m.Key))) i += copy(data[i:], m.Key) } data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Timestamp.Size())) n38, err := m.Timestamp.MarshalTo(data[i:]) if err != nil { return 0, err } i += n38 return i, nil } func (m *GCResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *GCResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n39, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n39 return i, nil } func (m *PushTxnRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *PushTxnRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n40, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n40 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.PusherTxn.Size())) n41, err := m.PusherTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n41 data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.PusheeTxn.Size())) n42, err := m.PusheeTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n42 data[i] = 0x22 i++ i = encodeVarintApi(data, i, uint64(m.PushTo.Size())) n43, err := m.PushTo.MarshalTo(data[i:]) if err != nil { return 0, err } i += n43 data[i] = 0x2a i++ i = encodeVarintApi(data, i, uint64(m.Now.Size())) n44, err := m.Now.MarshalTo(data[i:]) if err != nil { return 0, err } i += n44 data[i] = 0x30 i++ i = encodeVarintApi(data, i, uint64(m.PushType)) return i, nil } func (m *PushTxnResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *PushTxnResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n45, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n45 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.PusheeTxn.Size())) n46, err := m.PusheeTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n46 return i, nil } func (m *ResolveIntentRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResolveIntentRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n47, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n47 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.IntentTxn.Size())) n48, err := m.IntentTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n48 data[i] = 0x18 i++ i = encodeVarintApi(data, i, uint64(m.Status)) data[i] = 0x20 i++ if m.Poison { data[i] = 1 } else { data[i] = 0 } i++ return i, nil } func (m *ResolveIntentResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResolveIntentResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n49, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n49 return i, nil } func (m *ResolveIntentRangeRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResolveIntentRangeRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n50, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n50 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.IntentTxn.Size())) n51, err := m.IntentTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n51 data[i] = 0x18 i++ i = encodeVarintApi(data, i, uint64(m.Status)) data[i] = 0x20 i++ if m.Poison { data[i] = 1 } else { data[i] = 0 } i++ return i, nil } func (m *NoopResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *NoopResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n52, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n52 return i, nil } func (m *NoopRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *NoopRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n53, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n53 return i, nil } func (m *ResolveIntentRangeResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResolveIntentRangeResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n54, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n54 return i, nil } func (m *MergeRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *MergeRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n55, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n55 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Value.Size())) n56, err := m.Value.MarshalTo(data[i:]) if err != nil { return 0, err } i += n56 return i, nil } func (m *MergeResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *MergeResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n57, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n57 return i, nil } func (m *TruncateLogRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *TruncateLogRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n58, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n58 data[i] = 0x10 i++ i = encodeVarintApi(data, i, uint64(m.Index)) data[i] = 0x18 i++ i = encodeVarintApi(data, i, uint64(m.RangeID)) return i, nil } func (m *TruncateLogResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *TruncateLogResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n59, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n59 return i, nil } func (m *LeaderLeaseRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *LeaderLeaseRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Span.Size())) n60, err := m.Span.MarshalTo(data[i:]) if err != nil { return 0, err } i += n60 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Lease.Size())) n61, err := m.Lease.MarshalTo(data[i:]) if err != nil { return 0, err } i += n61 return i, nil } func (m *LeaderLeaseResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *LeaderLeaseResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.ResponseHeader.Size())) n62, err := m.ResponseHeader.MarshalTo(data[i:]) if err != nil { return 0, err } i += n62 return i, nil } func (m *RequestUnion) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *RequestUnion) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l if m.Get != nil { data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Get.Size())) n63, err := m.Get.MarshalTo(data[i:]) if err != nil { return 0, err } i += n63 } if m.Put != nil { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Put.Size())) n64, err := m.Put.MarshalTo(data[i:]) if err != nil { return 0, err } i += n64 } if m.ConditionalPut != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.ConditionalPut.Size())) n65, err := m.ConditionalPut.MarshalTo(data[i:]) if err != nil { return 0, err } i += n65 } if m.Increment != nil { data[i] = 0x22 i++ i = encodeVarintApi(data, i, uint64(m.Increment.Size())) n66, err := m.Increment.MarshalTo(data[i:]) if err != nil { return 0, err } i += n66 } if m.Delete != nil { data[i] = 0x2a i++ i = encodeVarintApi(data, i, uint64(m.Delete.Size())) n67, err := m.Delete.MarshalTo(data[i:]) if err != nil { return 0, err } i += n67 } if m.DeleteRange != nil { data[i] = 0x32 i++ i = encodeVarintApi(data, i, uint64(m.DeleteRange.Size())) n68, err := m.DeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } i += n68 } if m.Scan != nil { data[i] = 0x3a i++ i = encodeVarintApi(data, i, uint64(m.Scan.Size())) n69, err := m.Scan.MarshalTo(data[i:]) if err != nil { return 0, err } i += n69 } if m.BeginTransaction != nil { data[i] = 0x42 i++ i = encodeVarintApi(data, i, uint64(m.BeginTransaction.Size())) n70, err := m.BeginTransaction.MarshalTo(data[i:]) if err != nil { return 0, err } i += n70 } if m.EndTransaction != nil { data[i] = 0x4a i++ i = encodeVarintApi(data, i, uint64(m.EndTransaction.Size())) n71, err := m.EndTransaction.MarshalTo(data[i:]) if err != nil { return 0, err } i += n71 } if m.AdminSplit != nil { data[i] = 0x52 i++ i = encodeVarintApi(data, i, uint64(m.AdminSplit.Size())) n72, err := m.AdminSplit.MarshalTo(data[i:]) if err != nil { return 0, err } i += n72 } if m.AdminMerge != nil { data[i] = 0x5a i++ i = encodeVarintApi(data, i, uint64(m.AdminMerge.Size())) n73, err := m.AdminMerge.MarshalTo(data[i:]) if err != nil { return 0, err } i += n73 } if m.HeartbeatTxn != nil { data[i] = 0x62 i++ i = encodeVarintApi(data, i, uint64(m.HeartbeatTxn.Size())) n74, err := m.HeartbeatTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n74 } if m.Gc != nil { data[i] = 0x6a i++ i = encodeVarintApi(data, i, uint64(m.Gc.Size())) n75, err := m.Gc.MarshalTo(data[i:]) if err != nil { return 0, err } i += n75 } if m.PushTxn != nil { data[i] = 0x72 i++ i = encodeVarintApi(data, i, uint64(m.PushTxn.Size())) n76, err := m.PushTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n76 } if m.RangeLookup != nil { data[i] = 0x7a i++ i = encodeVarintApi(data, i, uint64(m.RangeLookup.Size())) n77, err := m.RangeLookup.MarshalTo(data[i:]) if err != nil { return 0, err } i += n77 } if m.ResolveIntent != nil { data[i] = 0x82 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ResolveIntent.Size())) n78, err := m.ResolveIntent.MarshalTo(data[i:]) if err != nil { return 0, err } i += n78 } if m.ResolveIntentRange != nil { data[i] = 0x8a i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ResolveIntentRange.Size())) n79, err := m.ResolveIntentRange.MarshalTo(data[i:]) if err != nil { return 0, err } i += n79 } if m.Merge != nil { data[i] = 0x92 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.Merge.Size())) n80, err := m.Merge.MarshalTo(data[i:]) if err != nil { return 0, err } i += n80 } if m.TruncateLog != nil { data[i] = 0x9a i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.TruncateLog.Size())) n81, err := m.TruncateLog.MarshalTo(data[i:]) if err != nil { return 0, err } i += n81 } if m.LeaderLease != nil { data[i] = 0xa2 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.LeaderLease.Size())) n82, err := m.LeaderLease.MarshalTo(data[i:]) if err != nil { return 0, err } i += n82 } if m.ReverseScan != nil { data[i] = 0xaa i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ReverseScan.Size())) n83, err := m.ReverseScan.MarshalTo(data[i:]) if err != nil { return 0, err } i += n83 } if m.Noop != nil { data[i] = 0xb2 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.Noop.Size())) n84, err := m.Noop.MarshalTo(data[i:]) if err != nil { return 0, err } i += n84 } return i, nil } func (m *ResponseUnion) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *ResponseUnion) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l if m.Get != nil { data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Get.Size())) n85, err := m.Get.MarshalTo(data[i:]) if err != nil { return 0, err } i += n85 } if m.Put != nil { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Put.Size())) n86, err := m.Put.MarshalTo(data[i:]) if err != nil { return 0, err } i += n86 } if m.ConditionalPut != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.ConditionalPut.Size())) n87, err := m.ConditionalPut.MarshalTo(data[i:]) if err != nil { return 0, err } i += n87 } if m.Increment != nil { data[i] = 0x22 i++ i = encodeVarintApi(data, i, uint64(m.Increment.Size())) n88, err := m.Increment.MarshalTo(data[i:]) if err != nil { return 0, err } i += n88 } if m.Delete != nil { data[i] = 0x2a i++ i = encodeVarintApi(data, i, uint64(m.Delete.Size())) n89, err := m.Delete.MarshalTo(data[i:]) if err != nil { return 0, err } i += n89 } if m.DeleteRange != nil { data[i] = 0x32 i++ i = encodeVarintApi(data, i, uint64(m.DeleteRange.Size())) n90, err := m.DeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } i += n90 } if m.Scan != nil { data[i] = 0x3a i++ i = encodeVarintApi(data, i, uint64(m.Scan.Size())) n91, err := m.Scan.MarshalTo(data[i:]) if err != nil { return 0, err } i += n91 } if m.BeginTransaction != nil { data[i] = 0x42 i++ i = encodeVarintApi(data, i, uint64(m.BeginTransaction.Size())) n92, err := m.BeginTransaction.MarshalTo(data[i:]) if err != nil { return 0, err } i += n92 } if m.EndTransaction != nil { data[i] = 0x4a i++ i = encodeVarintApi(data, i, uint64(m.EndTransaction.Size())) n93, err := m.EndTransaction.MarshalTo(data[i:]) if err != nil { return 0, err } i += n93 } if m.AdminSplit != nil { data[i] = 0x52 i++ i = encodeVarintApi(data, i, uint64(m.AdminSplit.Size())) n94, err := m.AdminSplit.MarshalTo(data[i:]) if err != nil { return 0, err } i += n94 } if m.AdminMerge != nil { data[i] = 0x5a i++ i = encodeVarintApi(data, i, uint64(m.AdminMerge.Size())) n95, err := m.AdminMerge.MarshalTo(data[i:]) if err != nil { return 0, err } i += n95 } if m.HeartbeatTxn != nil { data[i] = 0x62 i++ i = encodeVarintApi(data, i, uint64(m.HeartbeatTxn.Size())) n96, err := m.HeartbeatTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n96 } if m.Gc != nil { data[i] = 0x6a i++ i = encodeVarintApi(data, i, uint64(m.Gc.Size())) n97, err := m.Gc.MarshalTo(data[i:]) if err != nil { return 0, err } i += n97 } if m.PushTxn != nil { data[i] = 0x72 i++ i = encodeVarintApi(data, i, uint64(m.PushTxn.Size())) n98, err := m.PushTxn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n98 } if m.RangeLookup != nil { data[i] = 0x7a i++ i = encodeVarintApi(data, i, uint64(m.RangeLookup.Size())) n99, err := m.RangeLookup.MarshalTo(data[i:]) if err != nil { return 0, err } i += n99 } if m.ResolveIntent != nil { data[i] = 0x82 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ResolveIntent.Size())) n100, err := m.ResolveIntent.MarshalTo(data[i:]) if err != nil { return 0, err } i += n100 } if m.ResolveIntentRange != nil { data[i] = 0x8a i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ResolveIntentRange.Size())) n101, err := m.ResolveIntentRange.MarshalTo(data[i:]) if err != nil { return 0, err } i += n101 } if m.Merge != nil { data[i] = 0x92 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.Merge.Size())) n102, err := m.Merge.MarshalTo(data[i:]) if err != nil { return 0, err } i += n102 } if m.TruncateLog != nil { data[i] = 0x9a i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.TruncateLog.Size())) n103, err := m.TruncateLog.MarshalTo(data[i:]) if err != nil { return 0, err } i += n103 } if m.LeaderLease != nil { data[i] = 0xa2 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.LeaderLease.Size())) n104, err := m.LeaderLease.MarshalTo(data[i:]) if err != nil { return 0, err } i += n104 } if m.ReverseScan != nil { data[i] = 0xaa i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.ReverseScan.Size())) n105, err := m.ReverseScan.MarshalTo(data[i:]) if err != nil { return 0, err } i += n105 } if m.Noop != nil { data[i] = 0xb2 i++ data[i] = 0x1 i++ i = encodeVarintApi(data, i, uint64(m.Noop.Size())) n106, err := m.Noop.MarshalTo(data[i:]) if err != nil { return 0, err } i += n106 } return i, nil } func (m *Header) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *Header) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Timestamp.Size())) n107, err := m.Timestamp.MarshalTo(data[i:]) if err != nil { return 0, err } i += n107 data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Replica.Size())) n108, err := m.Replica.MarshalTo(data[i:]) if err != nil { return 0, err } i += n108 data[i] = 0x18 i++ i = encodeVarintApi(data, i, uint64(m.RangeID)) data[i] = 0x21 i++ i = encodeFixed64Api(data, i, uint64(math.Float64bits(float64(m.UserPriority)))) if m.Txn != nil { data[i] = 0x2a i++ i = encodeVarintApi(data, i, uint64(m.Txn.Size())) n109, err := m.Txn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n109 } data[i] = 0x30 i++ i = encodeVarintApi(data, i, uint64(m.ReadConsistency)) return i, nil } func (m *BatchRequest) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *BatchRequest) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Header.Size())) n110, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } i += n110 if len(m.Requests) > 0 { for _, msg := range m.Requests { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *BatchResponse) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *BatchResponse) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.BatchResponse_Header.Size())) n111, err := m.BatchResponse_Header.MarshalTo(data[i:]) if err != nil { return 0, err } i += n111 if len(m.Responses) > 0 { for _, msg := range m.Responses { data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(msg.Size())) n, err := msg.MarshalTo(data[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *BatchResponse_Header) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) if err != nil { return nil, err } return data[:n], nil } func (m *BatchResponse_Header) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l if m.Error != nil { data[i] = 0xa i++ i = encodeVarintApi(data, i, uint64(m.Error.Size())) n112, err := m.Error.MarshalTo(data[i:]) if err != nil { return 0, err } i += n112 } data[i] = 0x12 i++ i = encodeVarintApi(data, i, uint64(m.Timestamp.Size())) n113, err := m.Timestamp.MarshalTo(data[i:]) if err != nil { return 0, err } i += n113 if m.Txn != nil { data[i] = 0x1a i++ i = encodeVarintApi(data, i, uint64(m.Txn.Size())) n114, err := m.Txn.MarshalTo(data[i:]) if err != nil { return 0, err } i += n114 } return i, nil } func encodeFixed64Api(data []byte, offset int, v uint64) int { data[offset] = uint8(v) data[offset+1] = uint8(v >> 8) data[offset+2] = uint8(v >> 16) data[offset+3] = uint8(v >> 24) data[offset+4] = uint8(v >> 32) data[offset+5] = uint8(v >> 40) data[offset+6] = uint8(v >> 48) data[offset+7] = uint8(v >> 56) return offset + 8 } func encodeFixed32Api(data []byte, offset int, v uint32) int { data[offset] = uint8(v) data[offset+1] = uint8(v >> 8) data[offset+2] = uint8(v >> 16) data[offset+3] = uint8(v >> 24) return offset + 4 } func encodeVarintApi(data []byte, offset int, v uint64) int { for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) return offset + 1 } func (m *ResponseHeader) Size() (n int) { var l int _ = l l = m.Timestamp.Size() n += 1 + l + sovApi(uint64(l)) if m.Txn != nil { l = m.Txn.Size() n += 1 + l + sovApi(uint64(l)) } return n } func (m *GetRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *GetResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) if m.Value != nil { l = m.Value.Size() n += 1 + l + sovApi(uint64(l)) } return n } func (m *PutRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.Value.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *PutResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *ConditionalPutRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.Value.Size() n += 1 + l + sovApi(uint64(l)) if m.ExpValue != nil { l = m.ExpValue.Size() n += 1 + l + sovApi(uint64(l)) } return n } func (m *ConditionalPutResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *IncrementRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.Increment)) return n } func (m *IncrementResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.NewValue)) return n } func (m *DeleteRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *DeleteResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *DeleteRangeRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.MaxEntriesToDelete)) return n } func (m *DeleteRangeResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.NumDeleted)) return n } func (m *ScanRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.MaxResults)) return n } func (m *ScanResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Rows) > 0 { for _, e := range m.Rows { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *ReverseScanRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.MaxResults)) return n } func (m *ReverseScanResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Rows) > 0 { for _, e := range m.Rows { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *BeginTransactionRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *BeginTransactionResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *EndTransactionRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 2 if m.Deadline != nil { l = m.Deadline.Size() n += 1 + l + sovApi(uint64(l)) } if m.InternalCommitTrigger != nil { l = m.InternalCommitTrigger.Size() n += 1 + l + sovApi(uint64(l)) } if len(m.IntentSpans) > 0 { for _, e := range m.IntentSpans { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *EndTransactionResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.CommitWait)) if len(m.Resolved) > 0 { for _, b := range m.Resolved { l = len(b) n += 1 + l + sovApi(uint64(l)) } } return n } func (m *AdminSplitRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) if m.SplitKey != nil { l = len(m.SplitKey) n += 1 + l + sovApi(uint64(l)) } return n } func (m *AdminSplitResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *AdminMergeRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *AdminMergeResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *RangeLookupRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.MaxRanges)) n += 2 n += 2 return n } func (m *RangeLookupResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Ranges) > 0 { for _, e := range m.Ranges { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *HeartbeatTxnRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *HeartbeatTxnResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *GCRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Keys) > 0 { for _, e := range m.Keys { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *GCRequest_GCKey) Size() (n int) { var l int _ = l if m.Key != nil { l = len(m.Key) n += 1 + l + sovApi(uint64(l)) } l = m.Timestamp.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *GCResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *PushTxnRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.PusherTxn.Size() n += 1 + l + sovApi(uint64(l)) l = m.PusheeTxn.Size() n += 1 + l + sovApi(uint64(l)) l = m.PushTo.Size() n += 1 + l + sovApi(uint64(l)) l = m.Now.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.PushType)) return n } func (m *PushTxnResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) l = m.PusheeTxn.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *ResolveIntentRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.IntentTxn.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.Status)) n += 2 return n } func (m *ResolveIntentResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *ResolveIntentRangeRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.IntentTxn.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.Status)) n += 2 return n } func (m *NoopResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *NoopRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *ResolveIntentRangeResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *MergeRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.Value.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *MergeResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *TruncateLogRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.Index)) n += 1 + sovApi(uint64(m.RangeID)) return n } func (m *TruncateLogResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *LeaderLeaseRequest) Size() (n int) { var l int _ = l l = m.Span.Size() n += 1 + l + sovApi(uint64(l)) l = m.Lease.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *LeaderLeaseResponse) Size() (n int) { var l int _ = l l = m.ResponseHeader.Size() n += 1 + l + sovApi(uint64(l)) return n } func (m *RequestUnion) Size() (n int) { var l int _ = l if m.Get != nil { l = m.Get.Size() n += 1 + l + sovApi(uint64(l)) } if m.Put != nil { l = m.Put.Size() n += 1 + l + sovApi(uint64(l)) } if m.ConditionalPut != nil { l = m.ConditionalPut.Size() n += 1 + l + sovApi(uint64(l)) } if m.Increment != nil { l = m.Increment.Size() n += 1 + l + sovApi(uint64(l)) } if m.Delete != nil { l = m.Delete.Size() n += 1 + l + sovApi(uint64(l)) } if m.DeleteRange != nil { l = m.DeleteRange.Size() n += 1 + l + sovApi(uint64(l)) } if m.Scan != nil { l = m.Scan.Size() n += 1 + l + sovApi(uint64(l)) } if m.BeginTransaction != nil { l = m.BeginTransaction.Size() n += 1 + l + sovApi(uint64(l)) } if m.EndTransaction != nil { l = m.EndTransaction.Size() n += 1 + l + sovApi(uint64(l)) } if m.AdminSplit != nil { l = m.AdminSplit.Size() n += 1 + l + sovApi(uint64(l)) } if m.AdminMerge != nil { l = m.AdminMerge.Size() n += 1 + l + sovApi(uint64(l)) } if m.HeartbeatTxn != nil { l = m.HeartbeatTxn.Size() n += 1 + l + sovApi(uint64(l)) } if m.Gc != nil { l = m.Gc.Size() n += 1 + l + sovApi(uint64(l)) } if m.PushTxn != nil { l = m.PushTxn.Size() n += 1 + l + sovApi(uint64(l)) } if m.RangeLookup != nil { l = m.RangeLookup.Size() n += 1 + l + sovApi(uint64(l)) } if m.ResolveIntent != nil { l = m.ResolveIntent.Size() n += 2 + l + sovApi(uint64(l)) } if m.ResolveIntentRange != nil { l = m.ResolveIntentRange.Size() n += 2 + l + sovApi(uint64(l)) } if m.Merge != nil { l = m.Merge.Size() n += 2 + l + sovApi(uint64(l)) } if m.TruncateLog != nil { l = m.TruncateLog.Size() n += 2 + l + sovApi(uint64(l)) } if m.LeaderLease != nil { l = m.LeaderLease.Size() n += 2 + l + sovApi(uint64(l)) } if m.ReverseScan != nil { l = m.ReverseScan.Size() n += 2 + l + sovApi(uint64(l)) } if m.Noop != nil { l = m.Noop.Size() n += 2 + l + sovApi(uint64(l)) } return n } func (m *ResponseUnion) Size() (n int) { var l int _ = l if m.Get != nil { l = m.Get.Size() n += 1 + l + sovApi(uint64(l)) } if m.Put != nil { l = m.Put.Size() n += 1 + l + sovApi(uint64(l)) } if m.ConditionalPut != nil { l = m.ConditionalPut.Size() n += 1 + l + sovApi(uint64(l)) } if m.Increment != nil { l = m.Increment.Size() n += 1 + l + sovApi(uint64(l)) } if m.Delete != nil { l = m.Delete.Size() n += 1 + l + sovApi(uint64(l)) } if m.DeleteRange != nil { l = m.DeleteRange.Size() n += 1 + l + sovApi(uint64(l)) } if m.Scan != nil { l = m.Scan.Size() n += 1 + l + sovApi(uint64(l)) } if m.BeginTransaction != nil { l = m.BeginTransaction.Size() n += 1 + l + sovApi(uint64(l)) } if m.EndTransaction != nil { l = m.EndTransaction.Size() n += 1 + l + sovApi(uint64(l)) } if m.AdminSplit != nil { l = m.AdminSplit.Size() n += 1 + l + sovApi(uint64(l)) } if m.AdminMerge != nil { l = m.AdminMerge.Size() n += 1 + l + sovApi(uint64(l)) } if m.HeartbeatTxn != nil { l = m.HeartbeatTxn.Size() n += 1 + l + sovApi(uint64(l)) } if m.Gc != nil { l = m.Gc.Size() n += 1 + l + sovApi(uint64(l)) } if m.PushTxn != nil { l = m.PushTxn.Size() n += 1 + l + sovApi(uint64(l)) } if m.RangeLookup != nil { l = m.RangeLookup.Size() n += 1 + l + sovApi(uint64(l)) } if m.ResolveIntent != nil { l = m.ResolveIntent.Size() n += 2 + l + sovApi(uint64(l)) } if m.ResolveIntentRange != nil { l = m.ResolveIntentRange.Size() n += 2 + l + sovApi(uint64(l)) } if m.Merge != nil { l = m.Merge.Size() n += 2 + l + sovApi(uint64(l)) } if m.TruncateLog != nil { l = m.TruncateLog.Size() n += 2 + l + sovApi(uint64(l)) } if m.LeaderLease != nil { l = m.LeaderLease.Size() n += 2 + l + sovApi(uint64(l)) } if m.ReverseScan != nil { l = m.ReverseScan.Size() n += 2 + l + sovApi(uint64(l)) } if m.Noop != nil { l = m.Noop.Size() n += 2 + l + sovApi(uint64(l)) } return n } func (m *Header) Size() (n int) { var l int _ = l l = m.Timestamp.Size() n += 1 + l + sovApi(uint64(l)) l = m.Replica.Size() n += 1 + l + sovApi(uint64(l)) n += 1 + sovApi(uint64(m.RangeID)) n += 9 if m.Txn != nil { l = m.Txn.Size() n += 1 + l + sovApi(uint64(l)) } n += 1 + sovApi(uint64(m.ReadConsistency)) return n } func (m *BatchRequest) Size() (n int) { var l int _ = l l = m.Header.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Requests) > 0 { for _, e := range m.Requests { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *BatchResponse) Size() (n int) { var l int _ = l l = m.BatchResponse_Header.Size() n += 1 + l + sovApi(uint64(l)) if len(m.Responses) > 0 { for _, e := range m.Responses { l = e.Size() n += 1 + l + sovApi(uint64(l)) } } return n } func (m *BatchResponse_Header) Size() (n int) { var l int _ = l if m.Error != nil { l = m.Error.Size() n += 1 + l + sovApi(uint64(l)) } l = m.Timestamp.Size() n += 1 + l + sovApi(uint64(l)) if m.Txn != nil { l = m.Txn.Size() n += 1 + l + sovApi(uint64(l)) } return n } func sovApi(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozApi(x uint64) (n int) { return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *RequestUnion) GetValue() interface{} { if this.Get != nil { return this.Get } if this.Put != nil { return this.Put } if this.ConditionalPut != nil { return this.ConditionalPut } if this.Increment != nil { return this.Increment } if this.Delete != nil { return this.Delete } if this.DeleteRange != nil { return this.DeleteRange } if this.Scan != nil { return this.Scan } if this.BeginTransaction != nil { return this.BeginTransaction } if this.EndTransaction != nil { return this.EndTransaction } if this.AdminSplit != nil { return this.AdminSplit } if this.AdminMerge != nil { return this.AdminMerge } if this.HeartbeatTxn != nil { return this.HeartbeatTxn } if this.Gc != nil { return this.Gc } if this.PushTxn != nil { return this.PushTxn } if this.RangeLookup != nil { return this.RangeLookup } if this.ResolveIntent != nil { return this.ResolveIntent } if this.ResolveIntentRange != nil { return this.ResolveIntentRange } if this.Merge != nil { return this.Merge } if this.TruncateLog != nil { return this.TruncateLog } if this.LeaderLease != nil { return this.LeaderLease } if this.ReverseScan != nil { return this.ReverseScan } if this.Noop != nil { return this.Noop } return nil } func (this *RequestUnion) SetValue(value interface{}) bool { switch vt := value.(type) { case *GetRequest: this.Get = vt case *PutRequest: this.Put = vt case *ConditionalPutRequest: this.ConditionalPut = vt case *IncrementRequest: this.Increment = vt case *DeleteRequest: this.Delete = vt case *DeleteRangeRequest: this.DeleteRange = vt case *ScanRequest: this.Scan = vt case *BeginTransactionRequest: this.BeginTransaction = vt case *EndTransactionRequest: this.EndTransaction = vt case *AdminSplitRequest: this.AdminSplit = vt case *AdminMergeRequest: this.AdminMerge = vt case *HeartbeatTxnRequest: this.HeartbeatTxn = vt case *GCRequest: this.Gc = vt case *PushTxnRequest: this.PushTxn = vt case *RangeLookupRequest: this.RangeLookup = vt case *ResolveIntentRequest: this.ResolveIntent = vt case *ResolveIntentRangeRequest: this.ResolveIntentRange = vt case *MergeRequest: this.Merge = vt case *TruncateLogRequest: this.TruncateLog = vt case *LeaderLeaseRequest: this.LeaderLease = vt case *ReverseScanRequest: this.ReverseScan = vt case *NoopRequest: this.Noop = vt default: return false } return true } func (this *ResponseUnion) GetValue() interface{} { if this.Get != nil { return this.Get } if this.Put != nil { return this.Put } if this.ConditionalPut != nil { return this.ConditionalPut } if this.Increment != nil { return this.Increment } if this.Delete != nil { return this.Delete } if this.DeleteRange != nil { return this.DeleteRange } if this.Scan != nil { return this.Scan } if this.BeginTransaction != nil { return this.BeginTransaction } if this.EndTransaction != nil { return this.EndTransaction } if this.AdminSplit != nil { return this.AdminSplit } if this.AdminMerge != nil { return this.AdminMerge } if this.HeartbeatTxn != nil { return this.HeartbeatTxn } if this.Gc != nil { return this.Gc } if this.PushTxn != nil { return this.PushTxn } if this.RangeLookup != nil { return this.RangeLookup } if this.ResolveIntent != nil { return this.ResolveIntent } if this.ResolveIntentRange != nil { return this.ResolveIntentRange } if this.Merge != nil { return this.Merge } if this.TruncateLog != nil { return this.TruncateLog } if this.LeaderLease != nil { return this.LeaderLease } if this.ReverseScan != nil { return this.ReverseScan } if this.Noop != nil { return this.Noop } return nil } func (this *ResponseUnion) SetValue(value interface{}) bool { switch vt := value.(type) { case *GetResponse: this.Get = vt case *PutResponse: this.Put = vt case *ConditionalPutResponse: this.ConditionalPut = vt case *IncrementResponse: this.Increment = vt case *DeleteResponse: this.Delete = vt case *DeleteRangeResponse: this.DeleteRange = vt case *ScanResponse: this.Scan = vt case *BeginTransactionResponse: this.BeginTransaction = vt case *EndTransactionResponse: this.EndTransaction = vt case *AdminSplitResponse: this.AdminSplit = vt case *AdminMergeResponse: this.AdminMerge = vt case *HeartbeatTxnResponse: this.HeartbeatTxn = vt case *GCResponse: this.Gc = vt case *PushTxnResponse: this.PushTxn = vt case *RangeLookupResponse: this.RangeLookup = vt case *ResolveIntentResponse: this.ResolveIntent = vt case *ResolveIntentRangeResponse: this.ResolveIntentRange = vt case *MergeResponse: this.Merge = vt case *TruncateLogResponse: this.TruncateLog = vt case *LeaderLeaseResponse: this.LeaderLease = vt case *ReverseScanResponse: this.ReverseScan = vt case *NoopResponse: this.Noop = vt default: return false } return true } func (m *ResponseHeader) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Txn == nil { m.Txn = &Transaction{} } if err := m.Txn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { m.Value = &Value{} } if err := m.Value.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PutRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Value.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PutResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ConditionalPutRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConditionalPutRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConditionalPutRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Value.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ExpValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ExpValue == nil { m.ExpValue = &Value{} } if err := m.ExpValue.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ConditionalPutResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConditionalPutResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConditionalPutResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *IncrementRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: IncrementRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: IncrementRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Increment", wireType) } m.Increment = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.Increment |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *IncrementResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: IncrementResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: IncrementResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NewValue", wireType) } m.NewValue = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.NewValue |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteRangeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxEntriesToDelete", wireType) } m.MaxEntriesToDelete = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.MaxEntriesToDelete |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteRangeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NumDeleted", wireType) } m.NumDeleted = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.NumDeleted |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxResults", wireType) } m.MaxResults = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.MaxResults |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Rows = append(m.Rows, KeyValue{}) if err := m.Rows[len(m.Rows)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ReverseScanRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ReverseScanRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ReverseScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxResults", wireType) } m.MaxResults = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.MaxResults |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ReverseScanResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ReverseScanResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ReverseScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Rows = append(m.Rows, KeyValue{}) if err := m.Rows[len(m.Rows)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BeginTransactionRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BeginTransactionRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BeginTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BeginTransactionResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BeginTransactionResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BeginTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *EndTransactionRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: EndTransactionRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: EndTransactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Commit = bool(v != 0) case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Deadline", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Deadline == nil { m.Deadline = &Timestamp{} } if err := m.Deadline.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InternalCommitTrigger", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.InternalCommitTrigger == nil { m.InternalCommitTrigger = &InternalCommitTrigger{} } if err := m.InternalCommitTrigger.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field IntentSpans", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.IntentSpans = append(m.IntentSpans, Span{}) if err := m.IntentSpans[len(m.IntentSpans)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *EndTransactionResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: EndTransactionResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: EndTransactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitWait", wireType) } m.CommitWait = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.CommitWait |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Resolved", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Resolved = append(m.Resolved, make([]byte, postIndex-iNdEx)) copy(m.Resolved[len(m.Resolved)-1], data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *AdminSplitRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AdminSplitRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AdminSplitRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SplitKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.SplitKey = append(m.SplitKey[:0], data[iNdEx:postIndex]...) if m.SplitKey == nil { m.SplitKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *AdminSplitResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AdminSplitResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AdminSplitResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *AdminMergeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AdminMergeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AdminMergeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *AdminMergeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AdminMergeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AdminMergeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RangeLookupRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RangeLookupRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RangeLookupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxRanges", wireType) } m.MaxRanges = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.MaxRanges |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ConsiderIntents", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.ConsiderIntents = bool(v != 0) case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Reverse = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RangeLookupResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RangeLookupResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RangeLookupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Ranges = append(m.Ranges, RangeDescriptor{}) if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HeartbeatTxnRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HeartbeatTxnRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HeartbeatTxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HeartbeatTxnResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HeartbeatTxnResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HeartbeatTxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GCRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GCRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GCRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, GCRequest_GCKey{}) if err := m.Keys[len(m.Keys)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GCRequest_GCKey) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GCKey: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GCKey: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], data[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GCResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GCResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GCResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PushTxnRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PushTxnRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PushTxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PusherTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.PusherTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PusheeTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.PusheeTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PushTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.PushTo.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Now", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Now.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PushType", wireType) } m.PushType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.PushType |= (PushTxnType(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PushTxnResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PushTxnResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PushTxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PusheeTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.PusheeTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveIntentRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveIntentRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveIntentRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field IntentTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.IntentTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.Status |= (TransactionStatus(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Poison", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Poison = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveIntentResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveIntentResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveIntentResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveIntentRangeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveIntentRangeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveIntentRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field IntentTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.IntentTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.Status |= (TransactionStatus(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Poison", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Poison = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NoopResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NoopResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NoopResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NoopRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NoopRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NoopRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveIntentRangeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveIntentRangeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveIntentRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MergeRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MergeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MergeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Value.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MergeResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MergeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MergeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TruncateLogRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TruncateLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TruncateLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } m.Index = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType) } m.RangeID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.RangeID |= (RangeID(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TruncateLogResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TruncateLogResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TruncateLogResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *LeaderLeaseRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LeaderLeaseRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LeaderLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Span", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Span.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Lease.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *LeaderLeaseResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LeaderLeaseResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LeaderLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResponseHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ResponseHeader.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RequestUnion) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RequestUnion: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RequestUnion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Get == nil { m.Get = &GetRequest{} } if err := m.Get.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Put == nil { m.Put = &PutRequest{} } if err := m.Put.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConditionalPut", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ConditionalPut == nil { m.ConditionalPut = &ConditionalPutRequest{} } if err := m.ConditionalPut.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Increment", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Increment == nil { m.Increment = &IncrementRequest{} } if err := m.Increment.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Delete == nil { m.Delete = &DeleteRequest{} } if err := m.Delete.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteRange == nil { m.DeleteRange = &DeleteRangeRequest{} } if err := m.DeleteRange.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Scan", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Scan == nil { m.Scan = &ScanRequest{} } if err := m.Scan.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BeginTransaction", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.BeginTransaction == nil { m.BeginTransaction = &BeginTransactionRequest{} } if err := m.BeginTransaction.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndTransaction", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.EndTransaction == nil { m.EndTransaction = &EndTransactionRequest{} } if err := m.EndTransaction.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdminSplit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.AdminSplit == nil { m.AdminSplit = &AdminSplitRequest{} } if err := m.AdminSplit.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdminMerge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.AdminMerge == nil { m.AdminMerge = &AdminMergeRequest{} } if err := m.AdminMerge.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.HeartbeatTxn == nil { m.HeartbeatTxn = &HeartbeatTxnRequest{} } if err := m.HeartbeatTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Gc", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Gc == nil { m.Gc = &GCRequest{} } if err := m.Gc.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PushTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.PushTxn == nil { m.PushTxn = &PushTxnRequest{} } if err := m.PushTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RangeLookup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RangeLookup == nil { m.RangeLookup = &RangeLookupRequest{} } if err := m.RangeLookup.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 16: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResolveIntent", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ResolveIntent == nil { m.ResolveIntent = &ResolveIntentRequest{} } if err := m.ResolveIntent.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResolveIntentRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ResolveIntentRange == nil { m.ResolveIntentRange = &ResolveIntentRangeRequest{} } if err := m.ResolveIntentRange.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 18: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Merge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Merge == nil { m.Merge = &MergeRequest{} } if err := m.Merge.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 19: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TruncateLog", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.TruncateLog == nil { m.TruncateLog = &TruncateLogRequest{} } if err := m.TruncateLog.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 20: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LeaderLease", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.LeaderLease == nil { m.LeaderLease = &LeaderLeaseRequest{} } if err := m.LeaderLease.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 21: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ReverseScan", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ReverseScan == nil { m.ReverseScan = &ReverseScanRequest{} } if err := m.ReverseScan.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 22: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Noop", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Noop == nil { m.Noop = &NoopRequest{} } if err := m.Noop.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResponseUnion) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResponseUnion: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResponseUnion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Get == nil { m.Get = &GetResponse{} } if err := m.Get.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Put == nil { m.Put = &PutResponse{} } if err := m.Put.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConditionalPut", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ConditionalPut == nil { m.ConditionalPut = &ConditionalPutResponse{} } if err := m.ConditionalPut.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Increment", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Increment == nil { m.Increment = &IncrementResponse{} } if err := m.Increment.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Delete == nil { m.Delete = &DeleteResponse{} } if err := m.Delete.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteRange == nil { m.DeleteRange = &DeleteRangeResponse{} } if err := m.DeleteRange.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Scan", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Scan == nil { m.Scan = &ScanResponse{} } if err := m.Scan.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BeginTransaction", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.BeginTransaction == nil { m.BeginTransaction = &BeginTransactionResponse{} } if err := m.BeginTransaction.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndTransaction", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.EndTransaction == nil { m.EndTransaction = &EndTransactionResponse{} } if err := m.EndTransaction.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdminSplit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.AdminSplit == nil { m.AdminSplit = &AdminSplitResponse{} } if err := m.AdminSplit.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AdminMerge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.AdminMerge == nil { m.AdminMerge = &AdminMergeResponse{} } if err := m.AdminMerge.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.HeartbeatTxn == nil { m.HeartbeatTxn = &HeartbeatTxnResponse{} } if err := m.HeartbeatTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Gc", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Gc == nil { m.Gc = &GCResponse{} } if err := m.Gc.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PushTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.PushTxn == nil { m.PushTxn = &PushTxnResponse{} } if err := m.PushTxn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RangeLookup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RangeLookup == nil { m.RangeLookup = &RangeLookupResponse{} } if err := m.RangeLookup.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 16: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResolveIntent", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ResolveIntent == nil { m.ResolveIntent = &ResolveIntentResponse{} } if err := m.ResolveIntent.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResolveIntentRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ResolveIntentRange == nil { m.ResolveIntentRange = &ResolveIntentRangeResponse{} } if err := m.ResolveIntentRange.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 18: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Merge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Merge == nil { m.Merge = &MergeResponse{} } if err := m.Merge.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 19: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TruncateLog", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.TruncateLog == nil { m.TruncateLog = &TruncateLogResponse{} } if err := m.TruncateLog.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 20: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LeaderLease", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.LeaderLease == nil { m.LeaderLease = &LeaderLeaseResponse{} } if err := m.LeaderLease.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 21: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ReverseScan", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ReverseScan == nil { m.ReverseScan = &ReverseScanResponse{} } if err := m.ReverseScan.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 22: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Noop", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Noop == nil { m.Noop = &NoopResponse{} } if err := m.Noop.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Header) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Header: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Replica", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Replica.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RangeID", wireType) } m.RangeID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.RangeID |= (RangeID(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field UserPriority", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } iNdEx += 8 v = uint64(data[iNdEx-8]) v |= uint64(data[iNdEx-7]) << 8 v |= uint64(data[iNdEx-6]) << 16 v |= uint64(data[iNdEx-5]) << 24 v |= uint64(data[iNdEx-4]) << 32 v |= uint64(data[iNdEx-3]) << 40 v |= uint64(data[iNdEx-2]) << 48 v |= uint64(data[iNdEx-1]) << 56 m.UserPriority = UserPriority(math.Float64frombits(v)) case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Txn == nil { m.Txn = &Transaction{} } if err := m.Txn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadConsistency", wireType) } m.ReadConsistency = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ m.ReadConsistency |= (ReadConsistencyType(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchRequest) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Header.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Requests = append(m.Requests, RequestUnion{}) if err := m.Requests[len(m.Requests)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchResponse) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BatchResponse_Header", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.BatchResponse_Header.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Responses = append(m.Responses, ResponseUnion{}) if err := m.Responses[len(m.Responses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchResponse_Header) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Header: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &Error{} } if err := m.Error.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Timestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowApi } if iNdEx >= l { return io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthApi } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Txn == nil { m.Txn = &Transaction{} } if err := m.Txn.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(data[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthApi } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipApi(data []byte) (n int, err error) { l := len(data) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowApi } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowApi } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if data[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowApi } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthApi } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowApi } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := data[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipApi(data[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") )
{ "content_hash": "8a94b9ef19fa22897850e53fefe853f1", "timestamp": "", "source": "github", "line_count": 11236, "max_line_length": 151, "avg_line_length": 23.414026343894623, "alnum_prop": 0.6141325832446404, "repo_name": "il9ue/cockroach", "id": "11b8cfc758630e7b050873096454d50a197cab83", "size": "263080", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "roachpb/api.pb.go", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Awk", "bytes": "1029" }, { "name": "C", "bytes": "5828" }, { "name": "C++", "bytes": "55148" }, { "name": "CSS", "bytes": "24969" }, { "name": "Go", "bytes": "4318472" }, { "name": "HCL", "bytes": "20426" }, { "name": "HTML", "bytes": "5093" }, { "name": "JavaScript", "bytes": "5563" }, { "name": "Makefile", "bytes": "20027" }, { "name": "Protocol Buffer", "bytes": "120497" }, { "name": "Ruby", "bytes": "1408" }, { "name": "Shell", "bytes": "37213" }, { "name": "Smarty", "bytes": "1523" }, { "name": "TypeScript", "bytes": "203877" }, { "name": "Yacc", "bytes": "101682" } ], "symlink_target": "" }
This is the source code for my personal website, [pstblog.com](http://pstblog.com). It is based on the [Minimal Mistakes](https://github.com/mmistakes/minimal-mistakes) theme. Changes I have made this theme include: * Disqus comments load on a button click rather than automatically * Typography changes, mainly to the body text and navigation bar * Wider text field for the body * A variety of other style tweaks * Remove the Grunt build tool and dependencies. A more in-depth description of this theme's features is available at the Minimal Mistakes [theme setup page](http://mmistakes.github.io/minimal-mistakes/theme-setup/). ## Requirements This template is compatible with Jekyll 4.0 and up, uses Rouge for code highlighting and Kramdown for markdown conversion. As detailed below, Ruby is needed as well, depending on how much you want to customize the site. ## Run Locally If you want to run this locally, there are a few steps. 1. Fork this repo, clone the fork, and cd to the clone 2. Bundle install the dependencies in the `Gemfile` 3. Update the site's _config.yml with your own settings 4. Serve the site locally using jekyll ``` $ git clone url-for-your-clone $ cd path/to/clone $ bundle install $ bundle exec jekyll serve ``` Then visit the development server at http://localhost:4000/ to view your site. ### Docker If you're familiar with Docker, you can also use the `Dockerfile` or `docker-compose.yaml` to build an image and run a local container. Either use docker-compose in the root directory: ``` $ docker-compose up ``` Or use the `Dockerfile` to build your own image and start a container: ``` $ docker image build --tag blog-image . $ docker container run --rm --publish 4000:4000 --name blog-container --volume $(pwd):/home/app blog-image ``` This container will also be available at http://localhost:4000/, and it binds to your local root directory to live reload as you edit the blog.
{ "content_hash": "8cd1002e321c4cba0073e0b92a251985", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 219, "avg_line_length": 42.130434782608695, "alnum_prop": 0.7549019607843137, "repo_name": "psthomas/psthomas.github.io", "id": "878e7ed386154b515989f8dd516208c5474d1f7f", "size": "1953", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "README.md", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "1444" }, { "name": "HTML", "bytes": "9796379" }, { "name": "JavaScript", "bytes": "51633" }, { "name": "Ruby", "bytes": "54" }, { "name": "SCSS", "bytes": "55475" } ], "symlink_target": "" }
// default #elif defined(EIGEN_TEST_PART_2) #define EIGEN_MAX_STATIC_ALIGN_BYTES 16 #define EIGEN_MAX_ALIGN_BYTES 16 #elif defined(EIGEN_TEST_PART_3) #define EIGEN_MAX_STATIC_ALIGN_BYTES 32 #define EIGEN_MAX_ALIGN_BYTES 32 #elif defined(EIGEN_TEST_PART_4) #define EIGEN_MAX_STATIC_ALIGN_BYTES 64 #define EIGEN_MAX_ALIGN_BYTES 64 #endif #include "main.h" typedef Matrix<float, 6,1> Vector6f; typedef Matrix<float, 8,1> Vector8f; typedef Matrix<float, 12,1> Vector12f; typedef Matrix<double, 5,1> Vector5d; typedef Matrix<double, 6,1> Vector6d; typedef Matrix<double, 7,1> Vector7d; typedef Matrix<double, 8,1> Vector8d; typedef Matrix<double, 9,1> Vector9d; typedef Matrix<double,10,1> Vector10d; typedef Matrix<double,12,1> Vector12d; struct TestNew1 { MatrixXd m; // good: m will allocate its own array, taking care of alignment. TestNew1() : m(20,20) {} }; struct TestNew2 { Matrix3d m; // good: m's size isn't a multiple of 16 bytes, so m doesn't have to be 16-byte aligned, // 8-byte alignment is good enough here, which we'll get automatically }; struct TestNew3 { Vector2f m; // good: m's size isn't a multiple of 16 bytes, so m doesn't have to be 16-byte aligned }; struct TestNew4 { EIGEN_MAKE_ALIGNED_OPERATOR_NEW Vector2d m; float f; // make the struct have sizeof%16!=0 to make it a little more tricky when we allow an array of 2 such objects }; struct TestNew5 { EIGEN_MAKE_ALIGNED_OPERATOR_NEW float f; // try the f at first -- the EIGEN_ALIGN_MAX attribute of m should make that still work Matrix4f m; }; struct TestNew6 { Matrix<float,2,2,DontAlign> m; // good: no alignment requested float f; }; template<bool Align> struct Depends { EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(Align) Vector2d m; float f; }; template<typename T> void check_unalignedassert_good() { T *x, *y; x = new T; delete x; y = new T[2]; delete[] y; } #if EIGEN_MAX_STATIC_ALIGN_BYTES>0 template<typename T> void construct_at_boundary(int boundary) { char buf[sizeof(T)+256]; size_t _buf = reinterpret_cast<size_t>(buf); _buf += (EIGEN_MAX_ALIGN_BYTES - (_buf % EIGEN_MAX_ALIGN_BYTES)); // make 16/32/...-byte aligned _buf += boundary; // make exact boundary-aligned T *x = ::new(reinterpret_cast<void*>(_buf)) T; x[0].setZero(); // just in order to silence warnings x->~T(); } #endif void unalignedassert() { #if EIGEN_MAX_STATIC_ALIGN_BYTES>0 construct_at_boundary<Vector2f>(4); construct_at_boundary<Vector3f>(4); construct_at_boundary<Vector4f>(16); construct_at_boundary<Vector6f>(4); construct_at_boundary<Vector8f>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector12f>(16); construct_at_boundary<Matrix2f>(16); construct_at_boundary<Matrix3f>(4); construct_at_boundary<Matrix4f>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector2d>(16); construct_at_boundary<Vector3d>(4); construct_at_boundary<Vector4d>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector5d>(4); construct_at_boundary<Vector6d>(16); construct_at_boundary<Vector7d>(4); construct_at_boundary<Vector8d>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector9d>(4); construct_at_boundary<Vector10d>(16); construct_at_boundary<Vector12d>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Matrix2d>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Matrix3d>(4); construct_at_boundary<Matrix4d>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector2cf>(16); construct_at_boundary<Vector3cf>(4); construct_at_boundary<Vector2cd>(EIGEN_MAX_ALIGN_BYTES); construct_at_boundary<Vector3cd>(16); #endif check_unalignedassert_good<TestNew1>(); check_unalignedassert_good<TestNew2>(); check_unalignedassert_good<TestNew3>(); check_unalignedassert_good<TestNew4>(); check_unalignedassert_good<TestNew5>(); check_unalignedassert_good<TestNew6>(); check_unalignedassert_good<Depends<true> >(); #if EIGEN_MAX_STATIC_ALIGN_BYTES>0 if(EIGEN_MAX_ALIGN_BYTES>=16) { VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4f>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8f>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector12f>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector6d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector10d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector12d>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2cf>(8)); VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4i>(8)); } for(int b=8; b<EIGEN_MAX_ALIGN_BYTES; b+=8) { if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector8f>(b)); if(b<64) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix4f>(b)); if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector4d>(b)); if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix2d>(b)); if(b<128) VERIFY_RAISES_ASSERT(construct_at_boundary<Matrix4d>(b)); if(b<32) VERIFY_RAISES_ASSERT(construct_at_boundary<Vector2cd>(b)); } #endif } void test_unalignedassert() { CALL_SUBTEST(unalignedassert()); }
{ "content_hash": "577150239424ff3dd5ba1c4461b8aa6b", "timestamp": "", "source": "github", "line_count": 167, "max_line_length": 120, "avg_line_length": 31.269461077844312, "alnum_prop": 0.7192646495595557, "repo_name": "Jetpie/open-linear", "id": "014cc834b74aa06b593440e4e1149324409bae72", "size": "5677", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "lib/eigen/test/unalignedassert.cpp", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "225923" }, { "name": "C++", "bytes": "6900684" }, { "name": "CMake", "bytes": "184635" }, { "name": "Cuda", "bytes": "4909" }, { "name": "FORTRAN", "bytes": "1315584" }, { "name": "Makefile", "bytes": "1799" }, { "name": "Python", "bytes": "8937" }, { "name": "Shell", "bytes": "19416" } ], "symlink_target": "" }
import click @click.command() def main(args=None): """Console script for mcmc""" click.echo("Replace this message by putting your code into " "mcmc.cli.main") click.echo("See click documentation at http://click.pocoo.org/") if __name__ == "__main__": main()
{ "content_hash": "2224ad9c9977916db08dad04cd5d056d", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 68, "avg_line_length": 22.53846153846154, "alnum_prop": 0.6143344709897611, "repo_name": "mchakra2/parallelmcmc", "id": "4b5d817d54d3c434e5a2fcf9ead9658c39c2b498", "size": "318", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mcmc/cli.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2283" }, { "name": "Python", "bytes": "25693" } ], "symlink_target": "" }
<?php /** * Abstract container block with header * * @category Mage * @package Mage_Core * @author Magento Core Team <core@magentocommerce.com> */ class Mage_Page_Block_Template_Container extends Mage_Core_Block_Template { /** * Set default template * */ protected function _construct() { $this->setTemplate('page/template/container.phtml'); } }
{ "content_hash": "1bca53b823ab2037b6c0658886dcf6a8", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 73, "avg_line_length": 16.916666666666668, "alnum_prop": 0.6231527093596059, "repo_name": "rajanlamic/urbanart", "id": "94d9d3a5208b1f6073bb81876673c6ea35c5c439", "size": "1359", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "files/homedir/public_html/OTTOSCHADE.NET/artist/app/code/core/Mage/Page/Block/Template/Container.php", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "7465" }, { "name": "CSS", "bytes": "2252771" }, { "name": "HTML", "bytes": "5787517" }, { "name": "JavaScript", "bytes": "1303367" }, { "name": "PHP", "bytes": "44027761" }, { "name": "Perl", "bytes": "3645" }, { "name": "Ruby", "bytes": "5344" }, { "name": "Shell", "bytes": "25851" } ], "symlink_target": "" }
// Copyright (c) Brock Allen & Dominick Baier. All rights reserved. // Licensed under the Apache License, Version 2.0. See LICENSE in the project root for license information. using Host.Filters; using IdentityServer4.Quickstart.UI.Models; using IdentityServer4.Services; using Microsoft.AspNetCore.Mvc; using System.Threading.Tasks; namespace IdentityServer4.Quickstart.UI.Controllers { [SecurityHeaders] public class HomeController : Controller { private readonly IIdentityServerInteractionService _interaction; public HomeController(IIdentityServerInteractionService interaction) { _interaction = interaction; } public IActionResult Index() { return View(); } /// <summary> /// Shows the error page /// </summary> public async Task<IActionResult> Error(string errorId) { var vm = new ErrorViewModel(); // retrieve error details from identityserver var message = await _interaction.GetErrorContextAsync(errorId); if (message != null) { vm.Error = message; } return View("Error", vm); } } }
{ "content_hash": "ce4ded8031e26f4db2f14d842279b448", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 107, "avg_line_length": 27.644444444444446, "alnum_prop": 0.6262057877813505, "repo_name": "MichaelPetrinolis/IdentityServer4.Samples", "id": "d1d28ad7d084ead0e6181d9b6675ff91a5eeb6e9", "size": "1246", "binary": false, "copies": "5", "ref": "refs/heads/dev", "path": "Quickstarts/8_EntityFrameworkStorage/src/QuickstartIdentityServer/Controllers/HomeController.cs", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C#", "bytes": "667133" }, { "name": "CSS", "bytes": "18363" }, { "name": "HTML", "bytes": "9120" }, { "name": "JavaScript", "bytes": "6099329" } ], "symlink_target": "" }
namespace media_router { namespace { // The number of minutes a NOTIFICATION Issue stays in the IssueManager // before it is auto-dismissed. constexpr int kNotificationAutoDismissMins = 1; // The number of minutes a WARNING Issue stays in the IssueManager before it // is auto-dismissed. constexpr int kWarningAutoDismissMins = 5; } // namespace // static base::TimeDelta IssueManager::GetAutoDismissTimeout( const IssueInfo& issue_info) { if (issue_info.is_blocking) return base::TimeDelta(); switch (issue_info.severity) { case IssueInfo::Severity::NOTIFICATION: return base::Minutes(kNotificationAutoDismissMins); case IssueInfo::Severity::WARNING: return base::Minutes(kWarningAutoDismissMins); case IssueInfo::Severity::FATAL: NOTREACHED() << "FATAL issues should be blocking"; return base::TimeDelta(); } NOTREACHED(); return base::TimeDelta(); } IssueManager::IssueManager() : top_issue_(nullptr), task_runner_(content::GetUIThreadTaskRunner({})) {} IssueManager::~IssueManager() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); } void IssueManager::AddIssue(const IssueInfo& issue_info) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); auto& issues_map = issue_info.is_blocking ? blocking_issues_ : non_blocking_issues_; for (const auto& key_value_pair : issues_map) { const auto& issue = key_value_pair.second->issue; if (issue.info() == issue_info) return; } Issue issue(issue_info); std::unique_ptr<base::CancelableOnceClosure> cancelable_dismiss_cb; base::TimeDelta timeout = GetAutoDismissTimeout(issue_info); if (!timeout.is_zero()) { cancelable_dismiss_cb = std::make_unique<base::CancelableOnceClosure>(base::BindOnce( &IssueManager::ClearIssue, base::Unretained(this), issue.id())); task_runner_->PostDelayedTask(FROM_HERE, cancelable_dismiss_cb->callback(), timeout); } issues_map.emplace(issue.id(), std::make_unique<IssueManager::Entry>( issue, std::move(cancelable_dismiss_cb))); MaybeUpdateTopIssue(); } void IssueManager::ClearIssue(const Issue::Id& issue_id) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (non_blocking_issues_.erase(issue_id) || blocking_issues_.erase(issue_id)) MaybeUpdateTopIssue(); } void IssueManager::ClearNonBlockingIssues() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (non_blocking_issues_.empty()) return; non_blocking_issues_.clear(); MaybeUpdateTopIssue(); } void IssueManager::RegisterObserver(IssuesObserver* observer) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(observer); DCHECK(!issues_observers_.HasObserver(observer)); issues_observers_.AddObserver(observer); if (top_issue_) observer->OnIssue(*top_issue_); } void IssueManager::UnregisterObserver(IssuesObserver* observer) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); issues_observers_.RemoveObserver(observer); } IssueManager::Entry::Entry( const Issue& issue, std::unique_ptr<base::CancelableOnceClosure> cancelable_dismiss_callback) : issue(issue), cancelable_dismiss_callback(std::move(cancelable_dismiss_callback)) {} IssueManager::Entry::~Entry() = default; void IssueManager::MaybeUpdateTopIssue() { const Issue* new_top_issue = nullptr; // Select the first blocking issue in the list of issues. // If there are none, simply select the first issue in the list. if (!blocking_issues_.empty()) { new_top_issue = &blocking_issues_.begin()->second->issue; } else if (!non_blocking_issues_.empty()) { new_top_issue = &non_blocking_issues_.begin()->second->issue; } // If we've found a new top issue, then report it via the observer. if (new_top_issue != top_issue_) { top_issue_ = new_top_issue; for (auto& observer : issues_observers_) { if (top_issue_) observer.OnIssue(*top_issue_); else observer.OnIssuesCleared(); } } } } // namespace media_router
{ "content_hash": "64d915fa74312cc75074eeb1ff60b320", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 79, "avg_line_length": 32.06299212598425, "alnum_prop": 0.6957269155206287, "repo_name": "ric2b/Vivaldi-browser", "id": "fb760be3b8b1512651c78c64ff4db7954890d3f1", "size": "4451", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chromium/components/media_router/browser/issue_manager.cc", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
<?php namespace Reporting; use Reporting\Model\Reporting; use Reporting\Model\ReportingTable; use Zend\Db\ResultSet\ResultSet; use Zend\Db\TableGateway\TableGateway; class Module { public function getAutoloaderConfig() { return array( 'Zend\Loader\ClassMapAutoloader' => array( __DIR__ . '/autoload_classmap.php', ), 'Zend\Loader\StandardAutoloader' => array( 'namespaces' => array( __NAMESPACE__ => __DIR__ . '/src/' . __NAMESPACE__, ), ), ); } public function getConfig() { return include __DIR__ . '/config/module.config.php'; } public function getServiceConfig() { return array( //note: //trick to use one table for reading but a view (doing (x,y)-->geometry after insert) for inserting data //table : postgresql relation (table) : obstruction_report //insert view : postgresql relation (view) : insert_view //for now x,y should not benever edited! 'factories' => array( 'Reporting\Model\ReportingTable' => function($sm) { $tableGateway = $sm->get('ReportingTableGateway'); $table = new ReportingTable($tableGateway); return $table; }, 'ReportingTableGateway' => function ($sm) { $dbAdapter = $sm->get('Zend\Db\Adapter\Adapter'); $resultSetPrototype = new ResultSet(); $resultSetPrototype->setArrayObjectPrototype(new Reporting()); return new TableGateway('obstruction_report', $dbAdapter, null, $resultSetPrototype); }, 'Reporting\Model\InsertViewTable' => function($sm) { $tableGateway = $sm->get('InsertViewTableGateway'); $table = new ReportingTable($tableGateway); return $table; }, 'InsertViewTableGateway' => function ($sm) { $dbAdapter = $sm->get('Zend\Db\Adapter\Adapter'); $resultSetPrototype = new ResultSet(); $resultSetPrototype->setArrayObjectPrototype(new Reporting()); return new TableGateway('insert_view', $dbAdapter, null, $resultSetPrototype); }, ), ); } }
{ "content_hash": "d63672b5a902969926b5889eec4eaa11", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 108, "avg_line_length": 31.123076923076923, "alnum_prop": 0.6455758774097874, "repo_name": "Geode/waterway-obs-server", "id": "9be42ddb767a6a755e735591ca85c1f7a32f3e1f", "size": "2023", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "module/Reporting/Module.php", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "144353" }, { "name": "JavaScript", "bytes": "74766" }, { "name": "PHP", "bytes": "42166" }, { "name": "Perl", "bytes": "1640" }, { "name": "Shell", "bytes": "63" } ], "symlink_target": "" }
SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
{ "content_hash": "b1eafe572ee8ce9cf36e680ea3af4d16", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 39, "avg_line_length": 10.23076923076923, "alnum_prop": 0.6917293233082706, "repo_name": "mdoering/backbone", "id": "36d2ed877f2af46e44b8cb31da6ca7e726f7ff02", "size": "199", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Magnoliophyta/Liliopsida/Asparagales/Orchidaceae/Tropidia/Tropidia curculigoides/ Syn. Schoenomorphus capitatus/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
/* iCheck plugin Futurico skin ----------------------------------- */ .icheckbox_futurico, .iradio_futurico { display: inline-block; *display: inline; vertical-align: middle; margin: 0; padding: 0; width: 16px; height: 17px; background: url(futurico.png) no-repeat; border: none; cursor: pointer; } .icheckbox_futurico { background-position: 0 0; } .icheckbox_futurico.checked { background-position: -18px 0; } .icheckbox_futurico.disabled { background-position: -36px 0; cursor: default; } .icheckbox_futurico.checked.disabled { background-position: -54px 0; } .iradio_futurico { background-position: -72px 0; } .iradio_futurico.checked { background-position: -90px 0; } .iradio_futurico.disabled { background-position: -108px 0; cursor: default; } .iradio_futurico.checked.disabled { background-position: -126px 0; } /* Retina support */ @media only screen and (-webkit-min-device-pixel-ratio: 1.5), only screen and (-moz-min-device-pixel-ratio: 1.5), only screen and (-o-min-device-pixel-ratio: 3/2), only screen and (min-device-pixel-ratio: 1.5) { .icheckbox_futurico, .iradio_futurico { background-image: url(futurico@2x.png); -webkit-background-size: 144px 19px; background-size: 144px 19px; } }
{ "content_hash": "b95a9ba07cb07fd9b10bfbefe76833a1", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 61, "avg_line_length": 23.571428571428573, "alnum_prop": 0.6553030303030303, "repo_name": "ththao/yii2template", "id": "b32b29ceced1e3a66799a54fd7fc6c9031ba82a4", "size": "1320", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "web/admin/plugins/iCheck/futurico/futurico.css", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "1240" }, { "name": "Batchfile", "bytes": "1552" }, { "name": "CSS", "bytes": "503231" }, { "name": "HTML", "bytes": "2889140" }, { "name": "JavaScript", "bytes": "5767387" }, { "name": "PHP", "bytes": "345546" } ], "symlink_target": "" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_102) on Wed Nov 02 19:53:41 IST 2016 --> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Uses of Class org.apache.solr.morphlines.solr.GenerateSolrSequenceKeyBuilder (Solr 6.3.0 API)</title> <meta name="date" content="2016-11-02"> <link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../../script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.apache.solr.morphlines.solr.GenerateSolrSequenceKeyBuilder (Solr 6.3.0 API)"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../../../../../../org/apache/solr/morphlines/solr/package-summary.html">Package</a></li> <li><a href="../../../../../../org/apache/solr/morphlines/solr/GenerateSolrSequenceKeyBuilder.html" title="class in org.apache.solr.morphlines.solr">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?org/apache/solr/morphlines/solr/class-use/GenerateSolrSequenceKeyBuilder.html" target="_top">Frames</a></li> <li><a href="GenerateSolrSequenceKeyBuilder.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.apache.solr.morphlines.solr.GenerateSolrSequenceKeyBuilder" class="title">Uses of Class<br>org.apache.solr.morphlines.solr.GenerateSolrSequenceKeyBuilder</h2> </div> <div class="classUseContainer">No usage of org.apache.solr.morphlines.solr.GenerateSolrSequenceKeyBuilder</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../../../../../../org/apache/solr/morphlines/solr/package-summary.html">Package</a></li> <li><a href="../../../../../../org/apache/solr/morphlines/solr/GenerateSolrSequenceKeyBuilder.html" title="class in org.apache.solr.morphlines.solr">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?org/apache/solr/morphlines/solr/class-use/GenerateSolrSequenceKeyBuilder.html" target="_top">Frames</a></li> <li><a href="GenerateSolrSequenceKeyBuilder.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small> <i>Copyright &copy; 2000-2016 Apache Software Foundation. All Rights Reserved.</i> <script src='../../../../../../prettify.js' type='text/javascript'></script> <script type='text/javascript'> (function(){ var oldonload = window.onload; if (typeof oldonload != 'function') { window.onload = prettyPrint; } else { window.onload = function() { oldonload(); prettyPrint(); } } })(); </script> </small></p> </body> </html>
{ "content_hash": "dc60c44c379a021df46664bc9649ee0c", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 187, "avg_line_length": 38.621428571428574, "alnum_prop": 0.607545773996671, "repo_name": "johannesbraun/clm_autocomplete", "id": "dcccdc261ef32580b6f118a1a3266759f3d64e3e", "size": "5407", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/solr-morphlines-core/org/apache/solr/morphlines/solr/class-use/GenerateSolrSequenceKeyBuilder.html", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "AMPL", "bytes": "291" }, { "name": "Batchfile", "bytes": "63061" }, { "name": "CSS", "bytes": "238996" }, { "name": "HTML", "bytes": "230318" }, { "name": "JavaScript", "bytes": "1224188" }, { "name": "Jupyter Notebook", "bytes": "638688" }, { "name": "Python", "bytes": "3829" }, { "name": "Roff", "bytes": "34741083" }, { "name": "Shell", "bytes": "96828" }, { "name": "XSLT", "bytes": "124838" } ], "symlink_target": "" }
<?php namespace PhpSpec\Exception\Fracture; /** * Class MethodNotFoundException holds information about method not found * exception */ class MethodNotFoundException extends MethodInvocationException { }
{ "content_hash": "1b6b4f1984b60409e0a7ae716ab7efe3", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 73, "avg_line_length": 16.615384615384617, "alnum_prop": 0.7777777777777778, "repo_name": "Amaire/filmy", "id": "dd5079ef4510bb7c18631d3a0a41e209e4a57b24", "size": "549", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vendor/phpspec/phpspec/src/PhpSpec/Exception/Fracture/MethodNotFoundException.php", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "356" }, { "name": "JavaScript", "bytes": "504" }, { "name": "PHP", "bytes": "105599" } ], "symlink_target": "" }
.. index:: single: Introduction Introduction ============ Good developers always version their code using a SCM system, so why don't they do the same for their database schema? Phinx allows developers to alter and manipulate databases in a clear and concise way. It avoids the use of writing SQL by hand and instead offers a powerful API for creating migrations using PHP code. Developers can then version these migrations using their preferred SCM system. This makes Phinx migrations portable between different database systems. Phinx keeps track of which migrations have been run, so you can worry less about the state of your database and instead focus on building better software.
{ "content_hash": "7ced01be71b976ab09da1adf4cd1dc75", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 79, "avg_line_length": 43.125, "alnum_prop": 0.7971014492753623, "repo_name": "robmorgan/phinx", "id": "50c44443f86472cd6797532b7491e8eacf307167", "size": "690", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "docs/en/intro.rst", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "896" }, { "name": "PHP", "bytes": "909997" } ], "symlink_target": "" }
<!DOCTYPE html> <html lang="ja"> <head> <meta charset="utf-8"> <title>test</title> <script type="application/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.0/jquery.min.js"></script> <script type="application/javascript" src="../bower_components/sammy/lib/sammy.js"></script> </head> <body> <div>Open the browser console.</div> <div id="main"> <form method="POST" action="#/foo"> <input type="text" name="foo" value="bar" /> <input type="submit" value="submit" /> </form> </div> <script src="index.js"></script> </body> </html>
{ "content_hash": "c4d99a9be99001726d93c78c7d9704c3", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 121, "avg_line_length": 28.045454545454547, "alnum_prop": 0.6012965964343598, "repo_name": "purescript-contrib/purescript-sammy", "id": "9526b595feafaecf299ab0af87c0bd92d896e047", "size": "617", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/index.html", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "1556" }, { "name": "PureScript", "bytes": "3473" } ], "symlink_target": "" }
*This file documents the changes in **Radix** versions that are listed below.* *Items should be added to this file as:* ### YYYY-MM-DD Release + Additional changes. + More changes. * * * ### 2015-5-14 Release v0.5.6 + Add Float64 method to redis.Reply, courtesy of @viblo ### 2015-5-09 Release v0.5.5 + Major refactoring of encoding to be significantly faster and to incur fewer memory allocations ### 2015-2-25 Release v0.5.4 + Add Pattern field to SubRepy in extra/pubsub + DialTimeout now uses DialTimeout, instead of only using timeout value for read/write timeouts + Fix multiple bugs in extra/cluster that prevented proper failover ### 2015-1-31 Release v0.5.3 + Add throttle to Reset calls in cluster ### 2015-1-26 Release v0.5.2 + Make the cluster package be thread-safe ### 2014-12-18 Release v0.5.1 + Add a CarefullyPutMaster method to sentinel, analogous to the CarefullyPut method in pool + Updated the sentinel package's documentation to include some examples ### 2014-12-07 Release v0.5.0 + Upgrade tests to use the testify library. It's a little easier to use than the old one + Implement the cluster package + Small upgrades to pool and resp ### 2014-11-04 Release v0.4.8 + Fix writing float values and other arbitrary values in resp, which would have caused a connection to hang if a command with a float was ever sent ### 2014-10-23 Release v0.4.7 + Implement CmdError so clients can tell an application level error from an external error ### 2014-9-22 Release v0.4.6 + Fix bug in pubsub package preventing patterned sub commands from working (thanks @hayesgm!) ### 2014-8-22 Release v0.4.5 + Fix bug in pubsub where it wasn't pulling all subscribe replies + Update examples ### 2014-8-18 Release v0.4.4 + Fix reading of bulk strings for some rare cases ### 2014-8-14 Release v0.4.3 + Make sentinel package return error and not ClientError ### 2014-8-14 Release v0.4.2 + Fix Cmd not flattening arguments properly ### 2014-8-14 Release v0.4.1 + Make Client's connection be publicly accessible ### 2014-8-10 Release v0.4.0 + Rewrite encoding/decoding out into its own sub-package (`resp`) + Add `extra` sub-package with the following features * Simple connection pool * Pub/Sub client * Sentinel client with transparent failover ### 2012-10-05 Release v0.3.4 + Removed reconnecting logic as it didn't work as intended. ### 2012-10-02 Release v0.3.3 + Removed unused Cmd field from Error. ### 2012-09-28 Release v0.3.2 + Rewrote benchmark tool. + Issue #16: On failed request writes, Radix will now reconnect and retry once. + Issue #15: Fixed deadlock issue with laggy networks (thanks Guihong Bai). + Issue #12: Fixed missing ReplyError types from error replies (thanks Guihong Bai). + Issue #11: Fixed broken authentication (thanks Fabio Kung). + Issue #9: Error replies' methods now return the reply error instead of some method specific error (thanks Russ Ross). ### 2012-08-06 Release v0.3.1 + Fixed a bug in parameter formatting that caused crashing when nil parameter was given. + Removed Reply.Nil(), it is redundant. Do a `Reply.Type == ReplyNil` comparison instead. + Reply.List() returns now nil elements as empty strings instead of giving an error. + Reply.Hash() does not give error on nil values anymore, instead, nil values are just not set. ### 2012-08-02 Release v0.3.0 + Major parts rewritten, mostly in connection. About 2x performance boosts. + Fixed a map parameter parsing error. + Various other fixes. ### 2012-05-25 Release v0.2.1 + Fixed a race condition in connection pool that caused panic when GOMAXPROCS>1. + Fixed a bug where map arguments where not formatted always right. + Reply error messages now include the command that caused the error. + Added a rudimentary benchmark tool (WIP). ### 2012-05-19 Release v0.2.0 + New API for Client/MultiCommand commands. Command methods are now autogenerated by gen.bash. The old API had a problem of common namespace conflicts when doing ```. import ".../redis"```. + Renames: * Subscription.PSubscribe() -> Subscription.Psubscribe() * Subscription.PUnsubscribe() -> Subscription.Punsubscribe() * MessagePSubscribe -> MessagePsubscribe * MessagePUnsubscribe -> MessagePunsubscribe * MessagePMessage -> MessagePmessage ### 2012-05-16 Release v0.1.5 + Package name changed to redis (project name is still Radix). + Merged commits from Bobby Powers. + Fix for memory leak in connection closing. + Fix for reconnection. + Minor changes to Reply API. ### 2012-04-20 Release v0.1.4 + New API for commands (thanks Bobby Powers). + Source code moved to its own directory. ### 2012-04-10 Release v0.1.3 + Go v1.0 compatible. + Fixed broken tests. + Fixed issue #1: Very high memory usage due to bug in timeout error handling. + Fixed issue #2: Graceful reset of Connections when connection error is encountered. + Removed support for Client.Select method. Use separate clients for and MOVE command for controlling multiple databases instead. ### 2012-03-06 Release v0.1.2 + Various small fixes to make the package work with Go weekly/Go 1. + Renamed the package name to radix. + Removed Makefiles as Go 1's "go build" deprecates them. + Moved files from redis/ to the root directory of the project. + Rewrote most of the godoc strings to match more Go conventions. + Minor improvements to channel usage. * NOTE: The go tests are currently partially broken, because gocheck seems to be broken with newest version of Go. ### 2012-03-01 Release v0.1.1 + Updated connection pool to reuse old connections more efficiently. ### 2012-03-01 Release v0.1 + First stable release.
{ "content_hash": "24cad529b84f47c83df449cbf396c809", "timestamp": "", "source": "github", "line_count": 183, "max_line_length": 129, "avg_line_length": 31.043715846994534, "alnum_prop": 0.7408906882591093, "repo_name": "theskyinflames/go-misc", "id": "03ad303654f9e4e55acaa145552445589a4f2618", "size": "5694", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "vendor/github.com/fzzy/radix/CHANGELOG.md", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Cap'n Proto", "bytes": "3930" }, { "name": "Go", "bytes": "281311" }, { "name": "Shell", "bytes": "38856" } ], "symlink_target": "" }
 #pragma once #include <aws/lakeformation/LakeFormation_EXPORTS.h> #include <aws/lakeformation/model/FieldNameString.h> #include <aws/lakeformation/model/ComparisonOperator.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <utility> namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace LakeFormation { namespace Model { /** * <p>This structure describes the filtering of columns in a table based on a * filter condition.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/lakeformation-2017-03-31/FilterCondition">AWS * API Reference</a></p> */ class AWS_LAKEFORMATION_API FilterCondition { public: FilterCondition(); FilterCondition(Aws::Utils::Json::JsonView jsonValue); FilterCondition& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** * <p>The field to filter in the filter condition.</p> */ inline const FieldNameString& GetField() const{ return m_field; } /** * <p>The field to filter in the filter condition.</p> */ inline bool FieldHasBeenSet() const { return m_fieldHasBeenSet; } /** * <p>The field to filter in the filter condition.</p> */ inline void SetField(const FieldNameString& value) { m_fieldHasBeenSet = true; m_field = value; } /** * <p>The field to filter in the filter condition.</p> */ inline void SetField(FieldNameString&& value) { m_fieldHasBeenSet = true; m_field = std::move(value); } /** * <p>The field to filter in the filter condition.</p> */ inline FilterCondition& WithField(const FieldNameString& value) { SetField(value); return *this;} /** * <p>The field to filter in the filter condition.</p> */ inline FilterCondition& WithField(FieldNameString&& value) { SetField(std::move(value)); return *this;} /** * <p>The comparison operator used in the filter condition.</p> */ inline const ComparisonOperator& GetComparisonOperator() const{ return m_comparisonOperator; } /** * <p>The comparison operator used in the filter condition.</p> */ inline bool ComparisonOperatorHasBeenSet() const { return m_comparisonOperatorHasBeenSet; } /** * <p>The comparison operator used in the filter condition.</p> */ inline void SetComparisonOperator(const ComparisonOperator& value) { m_comparisonOperatorHasBeenSet = true; m_comparisonOperator = value; } /** * <p>The comparison operator used in the filter condition.</p> */ inline void SetComparisonOperator(ComparisonOperator&& value) { m_comparisonOperatorHasBeenSet = true; m_comparisonOperator = std::move(value); } /** * <p>The comparison operator used in the filter condition.</p> */ inline FilterCondition& WithComparisonOperator(const ComparisonOperator& value) { SetComparisonOperator(value); return *this;} /** * <p>The comparison operator used in the filter condition.</p> */ inline FilterCondition& WithComparisonOperator(ComparisonOperator&& value) { SetComparisonOperator(std::move(value)); return *this;} /** * <p>A string with values used in evaluating the filter condition.</p> */ inline const Aws::Vector<Aws::String>& GetStringValueList() const{ return m_stringValueList; } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline bool StringValueListHasBeenSet() const { return m_stringValueListHasBeenSet; } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline void SetStringValueList(const Aws::Vector<Aws::String>& value) { m_stringValueListHasBeenSet = true; m_stringValueList = value; } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline void SetStringValueList(Aws::Vector<Aws::String>&& value) { m_stringValueListHasBeenSet = true; m_stringValueList = std::move(value); } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline FilterCondition& WithStringValueList(const Aws::Vector<Aws::String>& value) { SetStringValueList(value); return *this;} /** * <p>A string with values used in evaluating the filter condition.</p> */ inline FilterCondition& WithStringValueList(Aws::Vector<Aws::String>&& value) { SetStringValueList(std::move(value)); return *this;} /** * <p>A string with values used in evaluating the filter condition.</p> */ inline FilterCondition& AddStringValueList(const Aws::String& value) { m_stringValueListHasBeenSet = true; m_stringValueList.push_back(value); return *this; } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline FilterCondition& AddStringValueList(Aws::String&& value) { m_stringValueListHasBeenSet = true; m_stringValueList.push_back(std::move(value)); return *this; } /** * <p>A string with values used in evaluating the filter condition.</p> */ inline FilterCondition& AddStringValueList(const char* value) { m_stringValueListHasBeenSet = true; m_stringValueList.push_back(value); return *this; } private: FieldNameString m_field; bool m_fieldHasBeenSet; ComparisonOperator m_comparisonOperator; bool m_comparisonOperatorHasBeenSet; Aws::Vector<Aws::String> m_stringValueList; bool m_stringValueListHasBeenSet; }; } // namespace Model } // namespace LakeFormation } // namespace Aws
{ "content_hash": "38221c486e363b71a4d4cdf95cdb0be8", "timestamp": "", "source": "github", "line_count": 162, "max_line_length": 168, "avg_line_length": 35.02469135802469, "alnum_prop": 0.6889319703912584, "repo_name": "jt70471/aws-sdk-cpp", "id": "d4d195aa957f264c2a6956a12f1c554656f2076a", "size": "5793", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/FilterCondition.h", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "13452" }, { "name": "C++", "bytes": "278594037" }, { "name": "CMake", "bytes": "653931" }, { "name": "Dockerfile", "bytes": "5555" }, { "name": "HTML", "bytes": "4471" }, { "name": "Java", "bytes": "302182" }, { "name": "Python", "bytes": "110380" }, { "name": "Shell", "bytes": "4674" } ], "symlink_target": "" }
import sharedLogger import books import parseUsfm class AbstractRenderer(object): def __init__(self, inputDir, outputDir, outputName, config): self.identity = 'abstract renderer' if not self.identity else self.identity self.outputDescription = outputName if not self.outputDescription else self.outputDescription self.logger = sharedLogger.currentLogger self.logger.info("\n Building: " + inputDir + "\n as: " + self.outputDescription + "\n using: " + self.identity) self.oebFlag = False self.config = config def setOEBFlag(self): self.oebFlag = True booksUsfm = None def loadUSFM(self, usfmDir): self.booksUsfm = books.loadBooks(usfmDir) def run(self, order='normal'): if order == 'normal': for bookName in books.silNames: self.renderBook(bookName) elif order == 'ntpsalms': for bookName in books.silNamesNTPsalms: self.renderBook(bookName) def renderBook(self, bookName): if bookName in self.booksUsfm: self.logger.debug('Rendering ' + bookName) tokens = parseUsfm.parseString(self.booksUsfm[bookName]) for t in tokens: t.renderOn(self) self.logger.debug('Rendered ' + bookName) def render_periph(self, token): return self.render_unhandled(token) def render_id(self, token): pass def render_ide(self, token): pass def render_h(self, token): return self.render_unhandled(token) def render_mt(self, token): return self.render_mt1(token) def render_mt1(self, token): return self.render_unhandled(token) def render_mt2(self, token): return self.render_unhandled(token) def render_mt3(self, token): return self.render_unhandled(token) def render_ms(self, token): return self.render_ms1(token) def render_ms1(self, token): return self.render_unhandled(token) def render_ms2(self, token): return self.render_unhandled(token) def render_mr(self, token): return self.render_unhandled(token) def render_mi(self, token): return self.render_unhandled(token) def render_p(self, token): return self.render_unhandled(token) def render_sp(self, token): return self.render_unhandled(token) def render_m(self, token): return self.render_unhandled(token) def render_s(self, token): return self.render_s1(token) def render_s1(self, token): return self.render_unhandled(token) def render_s2(self, token): return self.render_unhandled(token) def render_s3(self, token): return self.render_unhandled(token) def render_c(self, token): return self.render_unhandled(token) def render_v(self, token): return self.render_unhandled(token) def render_wj(self, token): return self.render_wj_s(token) def render_wj_s(self, token): return self.render_unhandled(token) def render_wj_e(self, token): return self.render_unhandled(token) def render_text(self, token): return self.render_unhandled(token) def render_q(self, token): return self.render_q1(token) def render_q1(self, token): return self.render_unhandled(token) def render_q2(self, token): return self.render_unhandled(token) def render_q3(self, token): return self.render_unhandled(token) def render_nb(self, token): return self.render_unhandled(token) def render_b(self, token): return self.render_unhandled(token) def render_qt(self, token): return self.render_qt_s(token) def render_qt_s(self, token): return self.render_unhandled(token) def render_qt_e(self, token): return self.render_unhandled(token) def render_r(self, token): return self.render_unhandled(token) def render_f(self, token): return self.render_f_s(token) def render_f_s(self, token): return self.render_unhandled(token) def render_f_e(self, token): return self.render_unhandled(token) def render_fr(self, token): return self.render_unhandled(token) def render_fr_e(self, token): return self.render_unhandled(token) def render_fk(self, token): return self.render_unhandled(token) def render_ft(self, token): return self.render_unhandled(token) def render_fq(self, token): return self.render_unhandled(token) def render_it(self, token): return self.render_it_s(token) def render_it_s(self, token): return self.render_unhandled(token) def render_it_e(self, token): return self.render_unhandled(token) def render_em(self, token): return self.render_em_s(token) def render_em_s(self, token): return self.render_unhandled(token) def render_em_e(self, token): return self.render_unhandled(token) def render_qs(self, token): return self.render_qs_s(token) def render_qs_s(self, token): return self.render_unhandled(token) def render_qs_e(self, token): return self.render_unhandled(token) def render_nd(self, token): return self.render_nd_s(token) def render_nd_s(self, token): return self.render_unhandled(token) def render_nd_e(self, token): return self.render_unhandled(token) def render_pbr(self, token): return self.render_unhandled(token) def render_d(self, token): return self.render_unhandled(token) def render_rem(self, token): pass def render_pi(self, token): return self.render_unhandled(token) def render_li(self, token): return self.render_unhandled(token) def render_x(self, token): return self.render_x_s(token) def render_x_s(self, token): return self.render_unhandled(token) def render_x_e(self, token): return self.render_unhandled(token) def render_xo(self, token): return self.render_unhandled(token) def render_xt(self, token): return self.render_unhandled(token) def render_xdc(self, token): return self.render_xdc_s(token) def render_xdc_s(self, token): return self.render_unhandled(token) def render_xdc_e(self, token): return self.render_unhandled(token) def render_tl(self, token): return self.render_tl_s(token) def render_tl_s(self, token): return self.render_unhandled(token) def render_tl_e(self, token): return self.render_unhandled(token) def render_add(self, token): return self.render_add_s(token) def render_add_s(self, token): return self.render_unhandled(token) def render_add_e(self, token): return self.render_unhandled(token) def render_toc1(self, token): return self.render_unhandled(token) def render_toc2(self, token): return self.render_unhandled(token) def render_toc3(self, token): return self.render_unhandled(token) def render_is1(self, token): return self.render_unhandled(token) def render_ip(self, token): return self.render_unhandled(token) def render_iot(self, token): return self.render_unhandled(token) def render_io1(self, token): return self.render_unhandled(token) def render_io2(self, token): return self.render_unhandled(token) def render_ior(self, token): return self.render_ior_s(token) def render_ior_s(self, token): return self.render_unhandled(token) def render_ior_e(self, token): return self.render_unhandled(token) def render_bk(self, token): return self.render_bk_s(token) def render_bk_s(self, token): return self.render_unhandled(token) def render_bk_e(self, token): return self.render_unhandled(token) def render_sc(self, token): return self.render_sc_s(token) def render_sc_s(self, token): return self.render_unhandled(token) def render_sc_e(self, token): return self.render_unhandled(token) def render_q_s(self, token): return self.render_qs_s(token) def render_qs_s(self, token): return self.render_unhandled(token) def render_qs_e(self, token): return self.render_unhandled(token) def render_pb(self, token): return self.render_unhandled(token) # Nested def render_nested_nd(self, token): return self.render_nested_nd_s(token) def render_nested_nd_s(self, token): return self.render_nd_s(token) def render_nested_nd_e(self, token): return self.render_nd_e(token) # This is unknown! def render_unknown(self, token): self.logger.warning("Unknown token ignored: " + token.getType() + " of value '" + token.getValue() + "'" ) # We do not specifically handle this! def render_unhandled(self, token): self.logger.debug("Unhandled token ignored: " + token.getType() + " of value '" + token.getValue() + "'" )
{ "content_hash": "e372f788c4883ee00d6d0e34203036b2", "timestamp": "", "source": "github", "line_count": 179, "max_line_length": 114, "avg_line_length": 49.687150837988824, "alnum_prop": 0.662019338880144, "repo_name": "openenglishbible/USFM-Tools", "id": "7f20d963db9e3267327ee13d1f2d156ab27e2a4e", "size": "8921", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "transform/support/abstractRenderer.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "11949" }, { "name": "HTML", "bytes": "768" }, { "name": "JavaScript", "bytes": "6102" }, { "name": "Python", "bytes": "142595" }, { "name": "Shell", "bytes": "1133" } ], "symlink_target": "" }
'use strict'; $('#stats').hide(); // How many combinations of 3 objects you can make from n objects var nChoose3 = function(n) { return n*(n-1)*(n-2)/6; }; // How many pairs of objects you can choose from n objects var nChoose2 = function(n) { return n*(n-1)/2; }; // Calculate and update the threesome statistics var updateThreesomes = function() { // Count the threesome participants var guys = parseInt($('#guys').val()) || 0; var girls = parseInt($('#girls').val()) || 0; var others = parseInt($('#others').val()) || 0; var totalPeople = guys + girls + others; // If we have enough people for a threesome: if (guys >= 0 && girls >= 0 && others >= 0 && guys + girls + others >= 3) { // Calculate the basic (male/female) threesomes var mmm = nChoose3(guys); var fff = nChoose3(girls); var mmf = girls*nChoose2(guys); var ffm = guys*nChoose2(girls); // Update text $('#mmm').text(mmm); $('#fff').text(fff); $('#mmf').text(mmf); $('#ffm').text(ffm); $('#total-people').text(totalPeople); $('#unique-threesomes').text(nChoose2(totalPeople-1)); // Only update/show threesomes with non-binary people if there's some present: if (others > 0) { // Calculate our numbers var mmo = others*nChoose2(guys); var fmo = others*girls*guys; var ffo = others*nChoose2(girls); var foo = girls*nChoose2(others); var moo = guys*nChoose2(others); var ooo = nChoose3(others); // Update text $('#mmo').text(mmo); $('#fmo').text(fmo); $('#ffo').text(ffo); $('#foo').text(foo); $('#moo').text(moo); $('#ooo').text(ooo); $('#nonbinary').show(); } else { $('#nonbinary').hide(); } // Show the text $('#stats').show(); } else { $('#stats').hide(); } }; // Call updateThreesomes() whenever the user changes the numbers $('#guys').change(updateThreesomes); $('#girls').change(updateThreesomes); $('#others').change(updateThreesomes);
{ "content_hash": "c9a2d0597a88811aabc1d47cdd8bc96a", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 82, "avg_line_length": 27.2027027027027, "alnum_prop": 0.5842026825633383, "repo_name": "adamchalmers/threesomes_web", "id": "742ea2aeea00f30aabfbdea75b0ea8a983c55b00", "size": "2013", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "static/scripts/main.js", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "7870" }, { "name": "HTML", "bytes": "10975" }, { "name": "JavaScript", "bytes": "8358" }, { "name": "Python", "bytes": "934073" } ], "symlink_target": "" }
<?xml version="1.0" encoding="ISO-8859-1"?> <!DOCTYPE pise SYSTEM "http://www.phylo.org/dev/rami/PARSER/pise.dtd"> <!-- This file was originally created to permit a complete configuration of RAXML using all command line parameters. Where doing an ML analysis after the bootstrap was optional and default settings will run the program under its "black box" settings. Note: values must be passed in for invariable, empirical, dna, and matrix, even if the precond prevents them from being set, even when the default value is the empty string. For example, if we're passed invariable = empty string, then the format code for the model parameter is OK, but if we are passed a parameter map that doesn't include invariable, when PiseCommandRenderer. restituteFormat tries to replace variables for model's code and it finds a reference to a parameter that doesn't exist it clobbers the whole code string. --> <!-- The file has sonce been updarted, most recently 1/1/2010 by mamiller to include new raxml options. This is the output of -h command for raxml 7.2.3 raxmlHPC[-SSE3|-PTHREADS|-PTHREADS-SSE3] ############################################################################################################################## Option Changes for 7.2.3 compared to 7.2.0 New options in 7.2.3: [-C] (will not support) Conduct model parameter optimization on gappy, partitioned multi-gene alignments with per-partition branch length estimates (-M enabled) using the fast method with pointer meshes described in: Stamatakis and Ott: "Efficient computation of the phylogenetic likelihood function on multi-gene alignments and multi-core processors" WARNING: We can not conduct any tree searches using this method yet! Does not work with Pthreads version. [-f A] (will not support) (usage not recommended) classify a bunch of environmental sequences into a reference tree with dynamic alignment under MP you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) [-f u] (not yet supported) execute morphological weight calibration algorithm, this will return a weight vector. you need to provide a morphological alignment and a reference tree via "-t" [-G placementThreshold] (not yet suported) enable the ML-based evolutionary placement algorithm heuristics by specifiyng a threshold value (fraction of insertion branches to be evaluated using slow insertions under ML). [-H placementThreshold] (not yet supported) enable the MP-based evolutionary placement algorithm heuristics by specifiyng a threshold value (fraction of insertion branches to be evaluated using slow insertions under ML). [-I autoFC|autoMR|autoMRE|autoMRE_IGN] a posteriori bootstopping analysis. Use: "-I autoFC" for the frequency-based criterion "-I autoMR" for the majority-rule consensus tree criterion "-I autoMRE" for the extended majority-rule consensus tree criterion "-I autoMRE_IGN" for metrics similar to MRE, but include bipartitions under the threshold whether they are compatible or not. This emulates MRE but is faster to compute. You also need to pass a tree file containing several bootstrap replicates via "-z" [-J MR|MRE] Compute majority rule consensus tree with "-J MR" or extended majority rule consensus tree with "-J MRE" You will need to provide a tree file containing several trees via "-z" [-K] syntax?(not yet supported)Specify one of the multi-state substitution models (max 32 states) implemented in RAxML. Available models are: ORDERED, MK, GTR DEFAULT: GTR model [-#|-NnumberOfRuns|autoFC|autoMR|autoMRE|autoMRE_IGN] Specify the number of alternative runs on distinct starting trees In combination with the "-b" option, this will invoke a multiple bootstrap analysis Note that "-N" has been added as an alternative since "-#" sometimes caused problems with certain MPI job submission systems, since "-#" is often used to start comments. If you want to use the bootstopping criteria specify "-# autoMR" or "-# autoMRE" or "-# autoMRE_IGN" for the majority-rule tree based criteria (see -I option) or "-# autoFC" for the frequency-based criterion. Bootstopping will only work in combination with "-x" or "-b" DEFAULT: 1 single analysis Removed options in 7.2.3: [-f k]: a posteriori bootstopping analysis using the FC criterion for a tree file containg several bootstrap replicates passed via "-z" [-f l]: a posteriori bootstopping analysis using the WC criterion for a tree file containg several bootstrap replicates passed via "-z" [-l sequenceSimilarityThreshold] Specify a threshold for sequence similarity clustering. RAxML will then print out an alignment to a file called sequenceFileName.reducedBy.threshold that only contains sequences <= the specified threshold that must be between 0.0 and 1.0. RAxML uses the QT-clustering algorithm to perform this task.In addition, a file called RAxML_reducedList.outputFileName will be written that contains clustering information.DEFAULT: OFF [-L] Same functionality as "-l" above, but uses a less exhasutive and thus faster clustering algorithm This is intended for very large datasets with more than 20,000-30,000 sequences. DEFAULT: OFF [-N autoWC] Specify the number of alternative runs on distinct starting trees, or let Raxml stop it automatically. In combination with the "-b" option, this will invoke a multiple bootstrap analysis Note that "-N" has been added as an alternative since "-#" sometimes caused problems with certain MPI job submission systems, since "-#" is often used to start comments If you want to use the bootstopping criteria specify "-# autoWC" for the weighted criterion or "-# autoFC" for the frequency-based criterion. This will only work in combination with "-x" or "-b" [-B wcCriterionThreshold] Specify a floating point number between 0.0 and 1.0 that will be used as cutoff threshold for the WC bootstopping criterion.The recommended setting is 0.03. 7.2.0 DEFAULT: 0.03 (recommended empirically determined setting) ############################################################################################################################### All options, 7.2.3 [-s] (required) Specify the name of the alignment data file in PHYLIP format (we manage this) [-n] (required) Specify the outputFileName (we manage this) [-m] (required) substitutionModel [-a weightFileName] [-A secondaryStructureSubstModel] [-b bootstrapRandomNumberSeed] [-c numberOfCategories] [-C] [-d] [-D] [-e likelihoodEpsilon] [-E excludeFileName] [-f a|A|b|c|d|e|g|h|j|m|n|o|p|q|r|s|t|u|v|w|x|y|X] [-F] [-g groupingFileName] [-G placementThreshold] [-h] [-H placementThreshold] [-i initialRearrangementSetting] [-I autoFC|autoMR|autoMRE|autoMRE_IGN] [-j] [-J MR|MRE] [-k] [-K] [-M] [-o outGroupName1[,outGroupName2[,...]]] [-p parsimonyRandomSeed] [-P proteinModel] [-q multipleModelFileName] [-r binaryConstraintTree] [-S secondaryStructureFile] [-t userStartingTree] [-T numberOfThreads] [-v] [-w workingDirectory] [-x rapidBootstrapRandomNumberSeed] [-y] [-Y] [-z multipleTreesFile] [-#|-N numberOfRuns|autoFC|autoMR|autoMRE|autoMRE_IGN] Here is a list of options we will support; below it a list options we will not support: [-A secondaryStructureSubstModel] Specify one of the RNA secondary structure substitution models implemented in RAxML. 7.2.0 The same nomenclature as in the PHASE manual is used, available models: S6A, S6B, S6C, S6D, S6E, S7A, S7B, S7C, S7D, S7E, S7F, S16, S16A, S16B DEFAULT: 16-state GTR model (S16) 6 state model nomenclature: http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node101.html 7 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node107.html 16 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node114.html [-b bootstrapRandomNumberSeed] Specify an integer number (random seed) and turn on bootstrapping 7.0.4 DEFAULT: OFF [-B wcCriterionThreshold] Specify a floating point number between 0.0 and 1.0 that will be used as cutoff threshold for the WC bootstopping criterion. The recommended setting is 0.03. 7.2.0 DEFAULT: 0.03 (recommended empirically determined setting) [-c numberOfCategories] Specify number of distinct rate categories for RAxML when modelOfEvolution is set to GTRCAT 7.0.4 Individual per-site rates are categorized into numberOfCategories rate categories to accelerate computations. DEFAULT: 25 [-D] ML search convergence criterion. This will break off ML searches if the relative Robinson-Foulds distance large trees between the trees obtained from two consecutive lazy SPR cycles is smaller or equal to 1%. Usage recommended in progress for very large datasets in terms of taxa. On trees with more than 500 taxa this will yield execution time improvements of approximately 50% while yielding only slightly worse trees. DEFAULT: OFF [-E excludeFileName] specify an exclude file name, that contains a specification of alignment positions you wish to exclude. 7.0.4 Format is similar to Nexus, the file shall contain entries like "100-200 300-400", to exclude a single column write, e.g., "100-100", if you use a mixed model, an appropriately adapted model file will be written. [-f a|b|d|e|g|h|j|m|n|r|u|v|w|x|y] 7.0.4 "-f a": rapid Bootstrap analysis and search for best-scoring ML tree in one program run 7.0.4 "-f b": draw bipartition information on a tree provided with "-t" based on multiple trees (e.g., from a bootstrap) in a file specified by "-z" 7.0.4 "-f d": new rapid hill-climbing (DEFAULT: ON) 7.0.4 "-f h": compute log likelihood test (SH-test) between best tree passed via "-t" and a bunch of other trees passed via "-z" 7.0.4 "-f s": option can be used to split a multi-gene alignment into individual genes, provided a model file with -q. This might be useful to select best fitting models for individual partitions of an AA multi-gene alignment or to infer per-partition trees in order to analyze tree compatibility. Example: raxmlHPC -f s -q part -s alg -m GTRCAT -n TEST. not yet "-f m": compare bipartitions between two bunches of trees passed via "-t" and "-z" respectively. This will return the Pearson correlation between all partitions found in the two tree files. A file called RAxML_bipartitionFrequencies.outpuFileName will be printed that contains the pair-wise bipartition frequencies of the two sets not yet "-f n": compute the log likelihood score of all trees contained in a tree file provided by "-z" under GAMMA or GAMMA+P-Invar not yet "-f r": compute pairwise Robinson-Foulds (RF) distances between all pairs of trees in a tree file passed via "-z" if the trees have node labels represented as integer support values the program will also compute two flavors of the weighted Robinson-Foulds (WRF) distance not yet "-f v": classify a bunch of environmental sequences into a reference tree using the slow heuristics without dynamic alignment you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) not yet "-f w": compute ELW test on a bunch of trees passed via "-z" "-f x": compute pair-wise ML distances, ML model parameters will be estimated on an MP starting tree or a user-defined tree passed via "-t", only allowed for GAMMA-based models of rate heterogeneity not yet "-f y": classify a bunch of environmental sequences into a reference tree using the fast heuristics without dynamic alignment you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) not yet "-f e": optimize model+branch lengths for given input tree under GAMMA/GAMMAI only not yet "-f g": compute per site log Likelihoods for one ore more trees passed via "-z" and write them to a file that can be read by CONSEL WARNING: does not print likelihoods in the original column order not yet "-f j": generate a bunch of bootstrapped alignment files from an original alignment file. You need to specify a seed with "-b" and the number of replicates with "-#" not yet "-f u": execute morphological weight calibration algorithm, this will return a weight vector. you need to provide a morphological alignment and a reference tree via "-t" [-F] enable ML tree searches under CAT model for very large trees without switching to GAMMA in the end (saves memory) and print out some additional files large trees)with intermediate trees from the search. This option can also be used with the GAMMA models in order to avoid the thorough optimization of the best-scoring in progress ML tree in the end. DEFAULT: OFF [-g groupingFileName] specify the file name of a multifurcating constraint tree this tree does not need to be comprehensive, i.e. must not contain all taxa 7.0.4 [-G] enable the ML-based evolutionary placement algorithm heuristics by specifiyng a threshold value (fraction of insertion branches to be evaluated using slow insertions under ML). [-H] enable the MP-based evolutionary placement algorithm heuristics by specifiyng a threshold value (fraction of insertion branches to be evaluated using slow insertions under ML). [-i initialRearrangementSetting] Initial rearrangement setting for the subsequent application of topological changes phase. 7.0.4 DEFAULT: determined by program; integer input; if selected, default value of 10 is provided. [-I] a posteriori bootstopping analysis. Use: "-I autoFC" for the frequency-based criterion "-I autoMR" for the majority-rule consensus tree criterion "-I autoMRE" for the extended majority-rule consensus tree criterion "-I autoMRE_IGN" for metrics similar to MRE, but include bipartitions under the threshold whether they are compatible or not. This emulates MRE but is faster to compute. You also need to pass a tree file containing several bootstrap replicates via "-z" [-J] Compute majority rule consensus tree with "-J MR" or extended majority rule consensus tree with "-J MRE" You will need to provide a tree file containing several trees via "-z" [-K] syntax?(not yet supported)Specify one of the multi-state substitution models (max 32 states) implemented in RAxML. Available models are: ORDERED, MK, GTR DEFAULT: GTR model [ -m Model of Binary (Morphological), Nucleotide or Amino Acid Substitution]: BINARY: (NEW for 7.2.0) (now implemented) "-m BINCAT" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under BINGAMMA, depending on the tree search option "-m BINCATI" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under BINGAMMAI, depending on the tree search option "-m BINGAMMA" : GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m BINGAMMAI" : Same as BINGAMMA, but with estimate of proportion of invariable sites NUCLEOTIDES: "-m GTRCAT" : GTR + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated under GTRGAMMA, depending on the tree search option "-m GTRCAT_FLOAT" : Same as above but uses single-precision floating point arithmetics instead of double-precision Usage only recommend for testing, the code will run slower, but can save almost 50% of memory. If you have problems with phylogenomic datasets and large memory requirements you may give it a shot. Keep in mind that numerical stability seems to be okay but needs further testing. "-m GTRCATI" : GTR + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated under GTRGAMMAI, depending on the tree search option "-m GTRGAMMA" : GTR + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m GTRGAMMA_FLOAT" : Same as GTRGAMMA, but also with single-precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply. "-m GTRGAMMAI" : Same as GTRGAMMA, but with estimate of proportion of invariable sites MULTI-STATE: "-m MULTICAT" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under MULTIGAMMA, depending on the tree search option "-m MULTICATI" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under MULTIGAMMAI, depending on the tree search option "-m MULTIGAMMA" : GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m MULTIGAMMAI" : Same as MULTIGAMMA, but with estimate of proportion of invariable sites You can use up to 32 distinct characters to encode multi-state regions, they must be used in the following order:0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V i.e., if you have 6 distinct characters you would use 0, 1, 2, 3, 4, 5 to encode these. The substitution model for the multi-state regions can be selected via the "-K" option AMINO ACIDS: "-m PROTCATmatrixName[F]" : specified AA matrix + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under PROTGAMMAmatrixName[f], depending on the tree search option "-m PROTCATmatrixName[F]_FLOAT" : PROTCAT with single precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply "-m PROTCATImatrixName[F]" : specified AA matrix + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under PROTGAMMAImatrixName[f], depending on the tree search option "-m PROTGAMMAmatrixName[F]" : specified AA matrix + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m PROTGAMMAmatrixName[F]_FLOAT" : PROTGAMMA with single precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply "-m PROTGAMMAImatrixName[F]" : Same as PROTGAMMAmatrixName[F], but with estimate of proportion of invariable sites Available AA substitution models: DAYHOFF, DCMUT, JTT, MTREV, WAG, RTREV, CPREV, VT, BLOSUM62, MTMAM, LG, GTR With the optional "F" appendix you can specify if you want to use empirical base frequencies Please note that for mixed models you can in addition specify the per-gene AA model in the mixed model file (see manual for details). Also note that if you estimate AA GTR parameters on a partitioned dataset, they will be linked estimated jointly) across all partitions to avoid over-parametrization [-o outGroupName1[,outGroupName2[,...]]] Specify the name of a single outgroup or a comma-separated list of outgroups, eg "-o Rat" or "-o Rat,Mouse", in case that multiple outgroups are not monophyletic the first name in the list will be selected as outgroup, don't leave spaces between taxon names! [-P proteinModel] Specify the file name of a user-defined AA (Protein) substitution model. This file must contain 420 entries, the first 400 being the AA substitution rates (this must be a symmetric matrix) and the last 20 are the empirical base frequencies [-q multipleModelFileName] Specify the file name which contains the assignment of models to alignment partitions for multiple models of substitution. For the syntax of this file please consult the manual. [-r binaryConstraintTree] Specify the file name of a binary constraint tree. this tree does not need to be comprehensive, i.e. must not contain all taxa [-S secondaryStructureFile] Specify the name of a secondary structure file. The file can contain "." for alignment columns that do not form part of a stem and characters "()<>[]{}" to define stem regions and pseudoknots [-t userStartingTree] Specify a user starting tree file name in Newick format [-T numberOfThreads] PTHREADS VERSION ONLY! Specify the number of threads you want to run. Make sure to set "-T" to at most the number of CPUs you have on your machine, otherwise, there will be a huge performance decrease! [-x rapidBootstrapRandomNumberSeed] Specify an integer number (random seed) and turn on rapid bootstrapping CAUTION: unlike in version 7.0.4 RAxML will conduct rapid BS replicates under the model of rate heterogeneity you specified via "-m" and not by default under CAT [-y] If you want to only compute a parsimony starting tree with RAxML specify "-y", the program will exit after computation of the starting tree DEFAULT: OFF [-Y] Do a more thorough parsimony tree search using a parsimony ratchet and exit. specify the number of ratchet searches via "-#" or "-N" This has just been implemented for completeness, if you want a fast MP implementation use TNT DEFAULT: OFF [-z multipleTreesFile] Specify the file name of a file containing multiple trees e.g. from a bootstrap that shall be used to draw bipartition values onto a tree provided with "-t", It can also be used to compute per site log likelihoods in combination with "-f g" and to read a bunch of trees for a couple of other options ("-f h", "-f m", "-f n"). [-#|-NnumberOfRuns|autoFC|autoMR|autoMRE|autoMRE_IGN] Specify the number of alternative runs on distinct starting trees In combination with the "-b" option, this will invoke a multiple bootstrap analysis Note that "-N" has been added as an alternative since "-#" sometimes caused problems with certain MPI job submission systems, since "-#" is often used to start comments. If you want to use the bootstopping criteria specify "-# autoMR" or "-# autoMRE" or "-# autoMRE_IGN" for the majority-rule tree based criteria (see -I option) or "-# autoFC" for the frequency-based criterion. Bootstopping will only work in combination with "-x" or "-b" DEFAULT: 1 single analysis ##########################################not supported ######################################### [-a weightFileName] Specify a column weight file name to assign individual weights to each column of the alignment. Those weights must be integers separated by any type and number of whitespaces within a separate file, see file "example_weights" for an example. [-C] Conduct model parameter optimization on gappy, partitioned multi-gene alignments with per-partition branch length estimates (-M enabled) using the fast method with pointer meshes described in: Stamatakis and Ott: "Efficient computation of the phylogenetic likelihood function on multi-gene alignments and multi-core processors" WARNING: We can not conduct any tree searches using this method yet! Does not work with Pthreads version. [-d] Start ML optimization from random starting tree DEFAULT: OFF [-e likelihoodEpsilon] set model optimization precision in log likelihood units for final optimization of tree topology under MIX/MIXI or GAMMA/GAMMAI DEFAULT: 0.1 for models not using proportion of invariant sites estimate 0.001 for models using proportion of invariant sites estimate [-f select algorithm]: "-f A":(usage not recommended) classify a bunch of environmental sequences into a reference tree with dynamic alignment under MP you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) "-f c": check if the alignment can be properly read by RAxML "-f e": optimize model+branch lengths for given input tree under GAMMA/GAMMAI only "-f g": compute per site log Likelihoods for one or more trees passed via "-z" and write them to a file that can be read by CONSEL "-f i": perform a really thorough bootstrap, refinement of final BS tree under GAMMA and a more exhaustive algorithm "-f j": generate a bunch of bootstrapped alignment files "-f o": (will not implement) old and slower rapid hill-climbing "-f p": perform pure stepwise MP addition of new sequences to an incomplete starting tree "-f t": do randomized tree searches on one fixed starting tree "-f X": (usage not recommended) classify a bunch of environmental sequences into a reference tree using the slow heuristics with dynamic alignment you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) "-f q": (usage not recommended) classify a bunch of environmental sequences into a reference tree using the fast heuristics with dynamic alignment you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) [-h] Display this help message [ -j] Specifies if checkpoints will be written by the program. If checkpoints (intermediate tree topologies) shall be written by the program specify "-j". DEFAULT: OFF [-k] Specifies that bootstrapped trees should be printed with branch lengths. The bootstraps will run a bit longer, because model parameters will be optimized at the end of each run under GAMMA or GAMMA+P-Invar respectively. DEFAULT: OFF (old version said Use with CATMIX/PROTMIX or GAMMA/GAMMAI., check on this) [-l sequenceSimilarityThreshold] Specify a threshold for sequence similarity clustering. RAxML will then print out an alignment to a file called sequenceFileName.reducedBy.threshold that only contains sequences <= the specified threshold that must be between 0.0 and 1.0. RAxML uses the QT-clustering algorithm to perform this task. In addition, a file called RAxML_reducedList.outputFileName will be written that contains clustering information. DEFAULT: OFF [-M] Switch on estimation of individual per-partition branch lengths. Only has effect when used in combination with "-q". Branch lengths for individual partitions will be printed to separate files A weighted average of the branch lengths is computed by using the respective partition lengths DEFAULT: OFF [-p] Specify a random number seed for the parsimony inferences. This allows you to reproduce your results and will help me debug the program. This option HAS NO EFFECT in the parallel MPI version [-u] Specify the number of multiple BS searches per replicate to obtain better ML trees for each replicate. DEFAULT: One ML search per BS replicate. Absent from raxml 7.2.0?? [-v] Display version information [-w workingDirectory] Name of the working directory where RAxML will write its output files DEFAULT: current directory --> <!-- ##################################(added on 5/5/2009)######################################################################### mamiller made some changes top grouping, so parameters are written on the command line in the order specified by the manual -E excludefile specify a range of characters to exclude ###################################(modified 8/3/2009)######################################################################## mamiller made the parsimony seed option invalid when the -y option is in force. ########################################################################################################################### ##################################(added on 9/1/2009)######################################################################## mamiller exposed the -f h option in response to a user request ##################################(added on 10/31/2009)###################################################################### mamiller repaired the -p option; removed the superfluous -f d option and its preconds. Also, the preconds all had &amp; where they should have had &amp;&amp; this was repaired Some ctrl features were broken. I cant seem to fix them and made the best workarounds I could with other tools and trext. ##################################(added on 12/31/2009)###################################################################### we must remove some of the model options that are no longer used (as of 7.1.0). this includes "-m GTRMIX" : Inference of the tree under GTRCAT and thereafter evaluation of the final tree topology under GTRGAMMA "-m GTRMIXI" : Same as GTRMIX, but with estimate of proportion of invariable sites "-m GTRCAT_GAMMA" : Inference of the tree with site-specific evolutionary rates. However, here rates are categorized using the 4 discrete GAMMA rates. Evaluation of the final tree topology under GTRGAMMA "-m GTRCAT_GAMMAI" : Same as GTRCAT_GAMMA, but with estimate of proportion of invariable sites these options are not supported: "-m GTRCAT_FLOAT" : Same as above but uses single-precision floating point arithmetics instead of double-precision Usage only recommend for testing, the code will run slower, but can save almost 50% of memory. If you have problems with phylogenomic datasets and large memory requirements you may give it a shot. Keep in mind that numerical stability seems to be okay but needs further testing. "-m GTRCATI" : Same as GTRCAT_GAMMA, but with estimate of proportion of invariable sites "-m GTRGAMMA_FLOAT" : Same as GTRGAMMA, but also with single-precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply. "-m GTRGAMMAI" : Same as GTRCAT_GAMMA, but with estimate of proportion of invariable sites change vdef to GTRCAT from alexis: > > Specifically, here is what I think is true (and I plan to write on our interface): > > For RAxML versions 7.1.0 and later, the same flag GTRGAMMA (which > creates the command line -m GTRGAMMA -x -f a) causes GTRGAMMA to be > used both during the rapid bootstrapping AND inference of the best > tree. This takes much longer than the previous method, Correct. > where GTRCAT is used to conduct the bootstrapping phase. In other > words, if you want to run a bootstrapping experiment, choosing the model GTRCAT (-m GTRCAT -x -f a) is identical to choosing GTRGAMMA in RAxML 7.0.4 and below. Exactly. > In other words, the GTRCAT switch causes GTRCAT to be used during the rapid bootstrapping, but the program then switches to GTRGAMMA for the ML search. Yes, but only in the very end, where an attempt is made to further improve the current ML tree using a more thorough version of SPR moves. > (BUT: I don't know what this means: "Final tree might be evaluated under GTRGAMMA, depending on the tree search option." For rapid bootstrapping the final ML tree will always be evaluated under GAMMA. > The GTRMIX option (which conducted inference under GRTCAT and calculated best tree under GTRGAMMA) is no longer offered for RAxML 7.1.0 and above. Exactly. > (was gtrmix the same as bootstrapping with gtrgamma in the past? Yes. > how should people who used that option accomplish the same analysis? > Was this not a useful option?) It was a useful option, GTRCAT now essentially does was GTRMIX used to do, I just designed the program like that, to keep users from inappropriately using CAT. ############################################################################################################################ User request for -f x I have been using raxml a fair bit recently and was wondering if you might be able to add a feature please? I'd like to be able to export a distance matrix. I don't know the exact syntax for that, but the 7.0.4 manual has -f x: at the bottom of page 9. Also, it might avoid some confusion if there is some comment in the section on using a starting tree (Supply a starting tree (-t))that this is not available when doing rapid bootstrapping as raxml just gives an error message without really saying why it gave an error. ############################################################################################################################# 1/19/2010 mamiller added logic to help the program run well using the hybrid option ############################################################################################################################## ############################################################################################################################## 9/25/2010 mamiller added new commands to update the interface for 7.2.7 the big change in this code is that bootstopping, automatic stopping of bootstraps, is not supported in the hybrid parallel code. this new interface reflects the rules created by Wayne Pfeiffer for picking the number of nodes, MPI processes, and threads to use for -N nnn, where nnn is not a number, but a bootstopping command. All of these bootstopping cases should use the same rules as for nnn >= 50. [-f c|E|F|i|I|J|o|p|R|s|S|t|U|v|] [-F] [-g groupingFileName] [-G placementThreshold] [-h] [-H placementThreshold] [-i initialRearrangementSetting] [-I autoFC|autoMR|autoMRE|autoMRE_IGN] [-j] [-J MR|MR_DROP|MRE|STRICT|STRICT_DROP] [-k] [-K] [-M] [-o outGroupName1[,outGroupName2[,...]]] [-O checkPointInterval] [-p parsimonyRandomSeed] [-P proteinModel] [-q multipleModelFileName] [-r binaryConstraintTree] [-R binaryModelParamFile] [-S secondaryStructureFile] [-t userStartingTree] [-T numberOfThreads] [-U] [-v] [-w outputDirectory] [-W slidingWindowSize] [-x rapidBootstrapRandomNumberSeed] [-y] [-Y] [-z multipleTreesFile] [-#|-N numberOfRuns|autoFC|autoMR|autoMRE|autoMRE_IGN] -a Specify a column weight file name to assign individual weights to each column of the alignment. Those weights must be integers separated by any type and number of whitespaces whithin a separate file, see file "example_weights" for an example. -A Specify one of the secondary structure substitution models implemented in RAxML. The same nomenclature as in the PHASE manual is used, available models: S6A, S6B, S6C, S6D, S6E, S7A, S7B, S7C, S7D, S7E, S7F, S16, S16A, S16B DEFAULT: 16-state GTR model (S16) -E specify an exclude file name, that contains a specification of alignment positions you wish to exclude. Format is similar to Nexus, the file shall contain entries like "100-200 300-400", to exclude a single column write, e.g., "100-100", if you use a mixed model, an appropriatly adapted model file will be written. -f select algorithm: [-f J|p|R|S|U|v|] [-F] (not sure) "-f J": Compute SH-like support values on a given tree passed via "-t". (not sure) "-f p": perform pure stepwise MP addition of new sequences to an incomplete starting tree and exit (not sure) "-f R": compute rogue taxa using new statistical method based on the evolutionary placement algorithm WARNING: this is experimental code (not sure) "-f S": compute site-specific placement bias using a leave one out test inspired by the evolutionary placement algorithm (not sure) "-f U": execute morphological wieght calibration using parsimony, this will return a weight vector. you need to provide a morphological alignment and a reference tree via "-t" (not sure) "-f v": classify a bunch of environmental sequences into a reference tree using the slow heuristics without dynamic alignment you will need to start RAxML with a non-comprehensive reference tree and an alignment containing all sequences (reference + query) DEFAULT for "-f": new rapid hill climbing (not supported) -O Enable checkpointing using the dmtcp library available at http://dmtcp.sourceforge.net/ This only works if you call the program by preceded by the command "dmtcp_checkpoint" and if you compile a dedicated binary using the appropriate Makefile. With "-O" you can specify the interval between checkpoints in seconds. DEFAULT: 3600.0 seconds (not supported) -R Specify the file name of a binary model parameter file that has previously been generated with RAxML using the -f e tree evaluation option. The file name should be: RAxML_binaryModelParameters.runID (not supported) -U Try to save memory by using SEV-based implementation for gap columns on large gappy alignments WARNING: this will only work for DNA under GTRGAMMA and is still in an experimental state. (not sure) -W Sliding window size for leave-one-out site-specific placement bias algorithm only effective when used in combination with "-f S" DEFAULT: 100 sites --> <pise> <head> <title>RAxML-HPC2 7.2.7 on Abe (Beta)</title> <version>7.2.7</version> <description>Phylogenetic tree inference using maximum likelihood/rapid bootstrapping run on teragrid. (beta interface)</description> <authors>Alexandros Stamatakis</authors> <reference>Stamatakis A. RAxML-VI-HPC: maximum likelihood-based phylogenetic analyses with thousands of taxa and mixed models.Bioinformatics. 2006 Nov 1;22(21):2688-90</reference> <category>Phylogeny / Alignment</category> <doclink>http://icwww.epfl.ch/~stamatak/index-Dateien/countManual7.0.0.php</doclink> </head> <command>raxmlhpc2_abe727</command> <parameters> <!-- new logic for the command line delivery revised by mamiller, 1/21/2010, based on new logic from Wayne Pfeiffer: For example, suppose -N nnn is specified with nnn < 20. What I intended was the following. Use one node with . 8 Pthreads **only if** -x xxx -f a is specified & nchar >= 2,000 or . 2 MPI processes and 4 Pthreads in all other cases. The other cases are -x xxx -f a with nchar < 2,000 -x xxx -f d -b bbb -f d or -f d. There is a similar dichotomy between 8 and 4 Pthreads for larger values of nnn. Upon reviewing my data more closely, I would like to change the nchar threshold to 2,000. So here is the complete specification for nnn a number. the logic considers four cases: 1. the -f a -N nnn option is used, this selection implies -x (hybridlogic1-3b); binning is based on nchar and boostop thresholds. 2. whenever the -N option is used to specify a number of bootstraps or identical runs to conduct, where -f d is the analysis (hybridlogic5-7b) the logic change is to accomodate the fact that a best tree is not calculated, which is not efficient under mpi. 3. the case where automatic bootstoppiong is used: a programmatic tools halts bootstrapping (hybridlogic8) 4. all other analyses (-y, -f [other option] (hybridlogic8b) In the first two cases, the number of chars and the value of -N is considered, the logic is identical, but I spilt them into two groups because it made it easier for me, and presumably anyone who follows me. These run the hybrid code EXCEPT the first case in each allows for using the pthreads only. In the third case, pthreads only code is used, and this is also true for the fourth case. The logic is constructed in pairs of cases, based on thresholds. Each threshold case is accompanied by a scheduler element. The precond for each logic scheduler element is necessarily identical to the mate that precedes it--> <!-- hybridlogic1 (revised 1/21/2010) For nnn < 20, use one node with 8 Pthreads (no MPI) if and only if -xxx -f a is specified & nchar >= 2,000 (pthreads only) or 2 MPI processes and 4 Pthreads in all other cases, when -x xxx -f a with nchar < 2,000, --> <!-- the first section considers only the cases where -f a is checked, and where bootstraps (=N, bootstop) are specified, these are binned as a matrix of bootstrap number and nchars --> <!--Start -f a, -N less than 20; start -nchars greater than 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic1</name> <attributes> <format> <language>perl</language> <!-- this should call pthread-only code, not sure we have the correct alias yet --> <code>"raxmlHPC-PTHREADS-7.2.7 -T 8"</code> </format> <precond> <!-- must be -f a AND -N (bootstop) < 20; when nchars >= 2000. --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &lt; 20 &amp;&amp; $nchars&gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- stop -f a -N < 20 -nchars greater than 2000 --> <!-- start -f a N < 20 -nchars less than 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic1b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f a AND -N (bootstop) < 20; when nchars < 2000. --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &lt; 20 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic1b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=2\\n" . "threads_per_process=4\\n" . "nodes=1\\n" </code> </format> <!-- the precond for each logic scheduler statement is necessarily identical to the one before it --> <precond> <!-- must be -f a AND -N (bootstop) , and -N < 20; nchars < 2000. It can be either -f a -x or -f d -x --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &lt; 20 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- stop -f a -N < 20 -nchars less than 2000 --> <!-- hybridlogic2 For nnn >= 20 & nnn < 50, use 2 nodes with . 2 MPI processes & 8 Pthreads if -xxx -f a is specified & nchar >= 2,000 or . 4 MPI processes & 4 Pthreads otherwise. --> <!-- Start -f a, 20< -N < 50; start -nchars less than 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic2</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 8"</code> </format> <precond> <!-- must be -f a AND -N (bootstop) >= 20, less than 50; nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50 &amp;&amp; $nchars&gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic2_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=2\\n" . "threads_per_process=8\\n" . "nodes=2\\n" </code> </format> <precond> <!-- must be -f a AND -N (bootstop) >= 20, less than 50; nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50 &amp;&amp; $nchars &gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- Stop -f a, 20< -N < 50; -nchars more than 2000 --> <!-- Start -f a, 20< -N < 50; -nchars less than 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic2b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f a -AND -N (bootstop), and -N >= 20, less than 50; nchars < 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic2b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=4\\n" . "threads_per_process=4\\n" . "nodes=2\\n" </code> </format> <precond> <!-- must be -f a AND -N (bootstop), and -N >= 20, less than 50; nchars < 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- hybridlogic3/3b modified, to account for not very good scaling at 10 nodes For nnn >= 50, use 5 nodes with . 5 MPI processes & 8 Pthreads if -xxx -f a is specified & nchar >= 2,000 or . 10 MPI processes & 4 Pthreads otherwise.. --> <!-- Start -f a, -N >= 50; -nchars >= 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic3</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 8 "</code> </format> <precond> <!-- must be -f a -AND -N (bootstop), and -N >= 50; nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 50 &amp;&amp; $nchars &gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic3_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=5\\n" . "threads_per_process=8\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f a AND -N (bootstop), and -N >= 50; nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 50 &amp;&amp; $nchars &gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- Stop -f a, -N >= 50; -nchars >= 2000 --> <!-- Start -f a, -N >= 50; -nchars less than 2000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic3b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4 "</code> </format> <precond> <!-- must be -f a AND -N (bootstop), and -N >= 50; nchars < 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 50 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic3b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=10\\n" . "threads_per_process=4\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f a AND -N (bootstop), and -N >= 50; nchars < 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 50 &amp;&amp; $nchars &lt; 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- Stop -f a, -N > 50; -nchars less than 2000 --> <!-- hybridlogic4 This section is subsumned by 3 and 3b, so ignore it. For nnn >= 100, use 10 nodes with . 10 MPI processes & 8 Pthreads if -xxx -f a is specified & nchar >= 1,000 or . 20 MPI processes and 4 Pthreads otherwise. This option was commented out because it doesnt always scale that well. more research is needed <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic4</name> <attributes> <format> <language>perl</language> <code>"raxml_7.2.6_hybrid mpi_processes=10 pthreads_per_process=8 nodes=10"</code> </format> --> <!-- must be -f a, and -N >= 100; nchars >= 1000 --> <!-- <precond> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 100 &amp;&amp; $nchars &gt;= 1000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic4b</name> <attributes> <format> <language>perl</language> <code>"raxml_7.2.6_hybrid mpi_processes=20 pthreads_per_process=4 nodes=10"</code> </format> --> <!-- must be -f a, and -N >= 100; nchars < 1000 --> <!-- <precond> <language>perl</language> <code>$mlsearch &amp;&amp; $bootstop &gt;= 100 &amp;&amp; $nchars &lt; 1000</code> </precond> <group>0</group> </attributes> </parameter> --> <!--Start -f d -N logic --> <!-- this section covers situations where -f d (default) and is in force. elements 5b, 6b, and 7b, cover replicates -f d -N; elements 7b, 8b, and 9b cover when -x or -b is in effect (but but -f a). Each of these should be the same as -f a when nchar is less than 1000 --> <!-- hybridlogic5b case a exists only when -f a; so now we are always numbering 5b, 6b, etc. when -f d -N is used, For nnn < 20, use one node with 2 MPI processes & 4 Pthreads --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic5b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f d -N < 20; nchars any--> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &lt; 20 </code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic5b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=2\\n" . "threads_per_process=4\\n" . "nodes=1\\n" </code> </format> <!-- the precond for each logic scheduler statement is necessarily identical to the one before it --> <precond> <!-- must be -f d -N < 20; nchars any --> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &lt; 20 </code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic6b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f d -N < 50; nchars any --> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &gt;= 20 &amp;&amp; $altrun_number &lt; 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic6b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=4\\n" . "threads_per_process=4\\n" . "nodes=2\\n" </code> </format> <precond> <!-- must be -f d -N < 50; nchars any --> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &gt;= 20 &amp;&amp; $altrun_number &lt; 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic7b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4 "</code> </format> <precond> <!-- must be -f d, and -N >= 50; nchars any --> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &gt;= 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic7b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=10\\n" . "threads_per_process=4\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f d, and -N >= 50; nchars any --> <language>perl</language> <code>$specify_runs &amp;&amp; $altrun_number &gt;= 50</code> </precond> <group>0</group> </attributes> </parameter> <!-- this section covers situations where -f d (default) and is in force and boostrapps are specifed by -x or -b, but -f a is not in force. each of these should be the same as -f a when nchar is less than 2000 --> <!-- hybridlogic8b case a exists only when -f d; so now we are always numbering 8b, 9b, etc. this applies when -f d -x or -b AND -N nnn is used. For nnn < 20, use one node with 2 MPI processes & 4 Pthreads --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic8b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f d -x or -b AND -N < 20; nchars any--> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &lt; 20</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic8b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=2\\n" . "threads_per_process=4\\n" . "nodes=1\\n" </code> </format> <!-- the precond for each logic scheduler statement is necessarily identical to the one before it --> <precond> <!-- must be -f d -x or -b AND -N < 20; nchars any --> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &lt; 20</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic9b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4"</code> </format> <precond> <!-- must be -f d -x or -b AND -N < 50; nchars any --> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic9b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=4\\n" . "threads_per_process=4\\n" . "nodes=2\\n" </code> </format> <precond> <!-- must be -f d -x or -b AND -N < 50; nchars any --> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &gt;= 20 &amp;&amp; $bootstop &lt; 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic10b</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4 "</code> </format> <precond> <!-- must be -f d -x or -b AND -N >= 50; nchars any --> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &gt;= 50</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic10b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=10\\n" . "threads_per_process=4\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f d -x or -b AND -N >= 50; nchars any --> <language>perl</language> <code>($mulparambootstrap_seed || $bootstrap_seed) &amp;&amp; !$mlsearch &amp;&amp; $bootstop &gt;= 50</code> </precond> <group>0</group> </attributes> </parameter> <!-- hybridlogic11 For all options where bootstopping is automatic to be run on Abe, use five nodes with 5 MPI processes and 8 Pthreads **only if** -x xxx -f a is specified & nchar >= 2,000 --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic11</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 8 "</code> </format> <precond> <!-- must be -f a AND use bootstopping of any kind, and nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $use_bootstopping &amp;&amp; $nchars &gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic11_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=5\\n" . "threads_per_process=8\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f a AND use bootstopping of any kind, and nchars >= 2000 --> <language>perl</language> <code>$mlsearch &amp;&amp; $use_bootstopping &amp;&amp; $nchars &gt;= 2000</code> </precond> <group>0</group> </attributes> </parameter> <!-- this one is for . 10 MPI processes and 4 Pthreads in all other cases --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic12</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-HYBRID-7.2.7 -T 4 "</code> </format> <precond> <!-- must be -f d -x or -b AND use_bootstopping; nchars any or -f a and nchars lt 2000 --> <language>perl</language> <code>($mulparambootstrap_seed || ($bootstrap_seed &amp;&amp; !$mlsearch) || ($mlsearch &amp;&amp; $nchars &lt; 2000)) &amp;&amp; $use_bootstopping</code> </precond> <group>0</group> </attributes> </parameter> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic12b_scheduler</name> <attributes> <paramfile>scheduler.conf</paramfile> <format> <language>perl</language> <code> "jobtype=mpi\\n" . "mpi_processes=10\\n" . "threads_per_process=4\\n" . "nodes=5\\n" </code> </format> <precond> <!-- must be -f d -x or -b AND use_bootstopping; nchars any or -f a and nchars lt 2000 --> <language>perl</language> <code>($mulparambootstrap_seed || ($bootstrap_seed &amp;&amp; !$mlsearch) || ($mlsearch &amp;&amp; $nchars &lt; 2000)) &amp;&amp; $use_bootstopping</code> </precond> <group>0</group> </attributes> </parameter> <!-- hybridlogic13 For all options where bootstrapping and -N are not used (not -f d -x (bootstrap_seed) or -b (mulparambootstrap_seed xxx -N nnn; and not -f d N nnn (specify_runs) to be run on Abe, use pthreads version, 8 Pthreads in one node. --> <parameter ishidden="1" type="String"> <name>raxmlhpc_hybridlogic13</name> <attributes> <format> <language>perl</language> <code>"raxmlHPC-PTHREADS-7.2.7 -T 8"</code> </format> <precond> <!-- deliver this line for any option that does not involve -f a or -f d --> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp; !$bootstrap_seed &amp;&amp; !$specify_runs</code> </precond> <group>0</group> </attributes> </parameter> <!-- required input file --> <parameter ismandatory="1" issimple="1" isinput="1" type="InFile"> <name>infile</name> <attributes> <prompt>Sequences File (relaxed phylip format) (-s)</prompt> <format> <language>perl</language> <code>" -s infile"</code> </format> <group>1</group> <filenames>infile</filenames> </attributes> </parameter> <!-- -n argument to specify output file suffix is required --> <parameter type="String" ishidden="1"> <name>outsuffix</name> <attributes> <format> <language>perl</language> <code>" -n result"</code> </format> <group>1</group> </attributes> </parameter> <!-- catchall to route std out to a log file --> <!-- the logfile isnt used on teragrid, it is replaced with std out <parameter type="String" ishidden="1"> <name>out_logifle</name> <attributes> <format> <language>perl</language> <code>" > output.txt"</code> </format> <group>99</group> </attributes> </parameter> --> <!-- Parameters with visible controls start here --> <parameter type="Float" issimple="1" ismandatory="1"> <name>runtime</name> <attributes> <group>1</group> <paramfile>scheduler.conf</paramfile> <prompt>Maximum Hours to Run (click here for help setting this correctly)</prompt> <vdef> <value>0.25</value> </vdef> <comment> <value>Estimate the maximum time your job will need to run (168hrs max). Your job will be killed if it doesn't finish within the time you specify, however jobs with shorter maximum run times are often scheduled sooner than longer jobs. Jobs that specify &lt; .5 hr are run in the debug queue and are normally scheduled quickly. Jobs that need less than 48 hrs go the normal queue and jobs requiring up to 168 hrs go the long queue.</value> </comment> <ctrls> <ctrl> <message>Maximum Hours to Run must be less than 168</message> <language>perl</language> <code>$runtime &gt; 168.0</code> </ctrl> </ctrls> <format> <language>perl</language> <code>"runhours=$value\\n"</code> </format> </attributes> </parameter> <!-- user enters nchar parameter this will help decide how to run --> <parameter type="Integer" issimple="1"> <name>nchars</name> <attributes> <prompt>Number of chars in your dataset</prompt> <vdef> <value>1000</value> </vdef> <ctrls> <ctrl> <message>Please enter a value for the number of characters in your data matrix</message> <language>perl</language> <code>!defined $nchars</code> </ctrl> <ctrl> <message>The number of characters in the matrix must 1 or greater.</message> <language>perl</language> <code>$nchars &lt; 1</code> </ctrl> </ctrls> <group>15</group> <comment> <value> Knowing the number of characters in your dataset helps us determine the most efficient way to run raxml. We need to know the number of characters per row in the input data matrix. </value> </comment> </attributes> </parameter> <!-- is it protein or dna input ? --> <parameter type="Excl" issimple="1" ismandatory="1"> <name>datatype</name> <attributes> <prompt>Sequence Type</prompt> <vlist> <value>protein</value> <label>Protein</label> <value>dna</value> <label>Nucleotide</label> <value>rna</value> <label>RNA Structure</label> <value>binary</value> <label>Binary</label> </vlist> <vdef> <value>dna</value> </vdef> <group>2</group> </attributes> </parameter> <!-- outgroup (-o) --> <parameter type="String" issimple="1"> <name>outgroup</name> <attributes> <prompt>Outgroup (one or more comma-separated outgroups, see comment for syntax)</prompt> <format> <language>perl</language> <code>(defined $value)? " -o $value " : "" </code> </format> <group>10</group> <comment> <value>The correct syntax for the box is outgroup1,outgroup2,outgroupn. If white space is introduced (e.g. outgroup1, outgroup2, outgroupn) the program will fail with the message "Error, you must specify a model of substitution with the '-m' option" </value> </comment> </attributes> </parameter> <!-- rev 2 --> <!--category (-c) --> <parameter type="Integer" issimple="1" ismandatory="1"> <name>number_cats</name> <attributes> <prompt>Specify the number of distinct rate categories (-c)</prompt> <format> <language>perl</language> <code>(defined $value)? " -c $value" : "" </code> </format> <vdef> <value>25</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>($datatype eq "dna" &amp;&amp; $dna_gtrcat) || ($datatype eq "protein" &amp;&amp; $prot_sub_model eq "PROTCAT") || ($datatype eq "binary" &amp;&amp; $bin_model eq "BINCAT")</code> </precond> <comment> <value>This option allows you to specify the number of distinct rate categories, into which the individually optimized rates for each individual site are thrown under -m GTRCAT. The default of -c 25 works fine in most practical cases. </value> </comment> </attributes> </parameter> <!-- user supplied starting tree (-t) --> <parameter issimple="1" type="InFile"> <name>treetop</name> <attributes> <prompt>Supply a starting tree (Not available when doing rapid bootstrapping) (-t)</prompt> <format> <language>perl</language> <code>" -t start_tree.tre"</code> </format> <group>2</group> <filenames>start_tree.tre</filenames> <!-- <ctrls> <ctrl> <message>Sorry, you can't specify a random seed AND supply a starting tree</message> <language>perl</language> <code>defined $value &amp;&amp; $provide_parsimony_seed</code> </ctrl> </ctrls> --> <comment> <value>Specifies a user starting tree file in Newick format. Not available when doing rapid bootstrapping. Branch lengths of that tree will be ignored. Note that you can also specify a non-comprehensive (not containing all taxa in the alignment) starting tree now. This might be useful if newly aligned/sequenced taxa have been added to your alignment. Initially, taxa will be added to the tree using the MP criterion. The comprehensive tree will then be optimized under ML.</value> </comment> </attributes> </parameter> <!--PARSIMONY SEED VALUE FOR CONSTANT STARTING TREE--> <!-- I split the seeds into invocation, and number entry, due to command line conflicts --> <parameter type="Switch" issimple="1" ismandatory="1"> <name>provide_parsimony_seed</name> <attributes> <prompt>Specify a random seed value for parsimony inferences (-p)</prompt> <vdef> <value>0</value> </vdef> <precond> <language>perl</language> <code>!$startingtreeonly &amp;&amp; !defined $treetop</code> </precond> <ctrls> <ctrl> <message>Sorry, you cannot specify a starting tree (via the -t option above) and a random seed value</message> <language>perl</language> <code>defined $treetop</code> </ctrl> </ctrls> <comment> <value>Specify a random number seed. The -p option allows you and others to reproduce your results (reproducible/verifiable experiments) and will help Alexis debug the program. Do not use this option if you want to generate multiple different starting trees.</value> </comment> </attributes> </parameter> <parameter type="Integer" issimple="1" ismandatory="1"> <name>parsimony_seed_val</name> <attributes> <prompt>Enter a random seed value for parsimony inferences (gives reproducible results from random starting tree)</prompt> <format> <language>perl</language> <code>($value) ? " -p $value" : ""</code> </format> <vdef> <value>12345</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>$provide_parsimony_seed</code> </precond> <ctrls> <ctrl> <message>Please enter a random seed for the -p option (eg 12345)</message> <language>perl</language> <code>$provide_parsimony_seed &amp;&amp; !defined $parsimony_seed_val</code> </ctrl> </ctrls> </attributes> </parameter> <!-- rearrangement (-i) INVOCATION AND SPECIFICATION --> <parameter type="Switch" issimple="1" ismandatory="1"> <name>rearrangement_yes</name> <attributes> <prompt>Specify an initial rearrangement setting (-i)</prompt> <vdef> <value>0</value> </vdef> </attributes> </parameter> <parameter type="Integer" issimple="1" ismandatory="1"> <name>number_rearrange</name> <attributes> <prompt>Specify the distance from original pruning point (-i)</prompt> <format> <language>perl</language> <code>(defined $value)? " -i $value" : "" </code> </format> <vdef> <value>10</value> </vdef> <precond> <language>perl</language> <code>$rearrangement_yes</code> </precond> <ctrls> <ctrl> <message>Please specify the distance from original pruning point (default would be 10)</message> <language>perl</language> <code>$rearrangement_yes &amp;&amp; !defined $number_rearrange</code> </ctrl> </ctrls> <group>2</group> <comment> <value>This option allows you to specify an initial rearrangement setting for the initial phase of the search algorithm. If you specify e.g. -i 10; the pruned subtrees will be inserted up to a distance of 10 nodes away from their original pruning point. If you dont specify -i; a "good" initial rearrangement setting will automatically be determined by RAxML. </value> </comment> </attributes> </parameter> <!-- rev 2 --> <!-- constraint (-g) --> <parameter type="InFile" issimple="1"> <name>constraint</name> <attributes> <precond> <language>perl</language> <code>!defined $binary_backbone &amp;&amp; !$startingtreeonly</code> </precond> <prompt>Constraint (-g)</prompt> <filenames>constraint.tre</filenames> <format> <language>perl</language> <!-- parameters of type Results are always processed so we need to check whether we actually got a value entered. --> <code>defined $value ? " -g constraint.tre" : ""</code> </format> <group>2</group> <comment> <value> This option allows you to specify an incomplete or comprehensive multifurcating constraint tree for the RAxML search in NEWICK format. Initially, multifurcations are resolved randomly. If the tree is incomplete (does not contain all taxa) the remaining taxa are added by using the MP criterion. Once a comprehensive (containing all taxa) bifurcating tree is computed, it is further optimized under ML respecting the given constraints. Important: If you specify a non-comprehensive constraint, e.g., a constraint tree that does not contain all taxa, RAxML will assume that any taxa that not found in the constraint topology are unconstrained, i.e., these taxa can be placed in any part of the tree. As an example consider an alignment with 10 taxa: Loach, Chicken, Human, Cow, Mouse, Whale, Seal, Carp, Rat, Frog. If, for example you would like Loach, Chicken, Human, Cow to be monophyletic you would specify the constraint tree as follows: </value> <value> ((Loach, Chicken, Human, Cow),(Mouse, Whale, Seal, Carp, Rat, Frog)); </value> <value> Moreover, if you would like Loach, Chicken, Human, Cow to be monophyletic and in addition Human, Cow to be monophyletic within that clade you could specify: </value> <value> ((Loach, Chicken, (Human, Cow)),(Mouse, Whale, Seal, Carp, Rat, Frog)); </value> <value> If you specify an incomplete constraint: </value> <value> ((Loach, Chicken, Human, Cow),(Mouse, Whale, Seal, Carp)); </value> <value> the two groups Loach, Chicken, Human, Cow and Mouse, Whale, Seal, Carp will be monophyletic, while Rat and Frog can end up anywhere in the tree. </value> </comment> </attributes> </parameter> <!-- binary backbone (-r) --> <parameter type="InFile" issimple="1"> <name>binary_backbone</name> <attributes> <precond> <language>perl</language> <code>! defined $constraint</code> </precond> <prompt>Binary Backbone (-r)</prompt> <filenames>binary_backbone.tre</filenames> <format> <language>perl</language> <!-- parameters of type Results are always processed so we need to check whether we actually got a value entered. --> <code>(defined $value) ? " -r binary_backbone.tre" : ""</code> </format> <group>2</group> <comment> <value>This option allows you to pass a binary/bifurcating constraint/backbone tree in NEWICK format to RAxML. Note that using this option only makes sense if this tree contains fewer taxa than the input alignment. The remaining taxa will initially be added by using the MP criterion. Once a comprehensive tree with all taxa has been obtained it will be optimized under ML respecting the restrictions of the constraint tree. </value> </comment> </attributes> </parameter> <!-- Optional mixed model file --> <!-- If user doesn't enter a value the partition parameter won't be in the map that the web app sends the command renderer (which evaluates the perl for parameters that are present), so we don't need to worry about making the format code conditional since this parameter just won't be evaluated. Command renderer evaluates 3 types of parameters: 1) those that appear in the map coming from the gui, those that are hidden, 3) those that are of type OutFile or Result. --> <parameter type="InFile" issimple="1"> <name>partition</name> <attributes> <prompt>Use a mixed/partitioned model? (-q)</prompt> <format> <language>perl</language> <code>" -q part"</code> </format> <group>2</group> <filenames>part</filenames> <comment> <value>This parameter allows you to upload a file that specifies the regions of your alignment for which an individual model of nucleotide substitution should be estimated. This will typically be used to infer trees for long (in terms of base pairs) multi-gene alignments. If DNA and protein mixed models are used together (for example) you should choose a model option based on the model of rate heterogeneity you want to use. If you specify either -m GTRCAT or PROTCAT, the CAT model will be used, if you specify -m GTRGAMMA or -m BINGAMMA, the GAMMA model will be used .... For example, if -m GTRGAMMA is used, individual alpha-shape parameters, GTR-rates, and empirical base frequencies will be estimated and optimized for each partition. Since Raxml can now handles mixed Amino Acid and DNA alignments, you must specify the data type in the partition file, before the partition name. For DNA, this means you have to add DNA to each line in the partition. For AA data you must specify the transition matrices for each partition: The AA substitution model must be the first entry in each line and must be separated by a comma from the gene name, just like the DNA token above. You can not assign different models of rate heterogeneity to different partitions, i.e. it will be either CAT, GAMMA, GAMMAI etc. for all partitions, as specified with -m. Finally, if you have a concatenated DNA and AA alignments, with DNA data at positions 1 - 500 and AA data at 501-1000 with the WAG model the partition file should look as follows:</value> <value>DNA, gene1 = 1-500</value> <value>WAG gene2 = 501-1000</value> </comment> </attributes> </parameter> <!-- 7.0.4 "-f s": option can be used to split a multi-gene alignment into individual genes, provided a model file with -q. This might be useful to select best fitting models for individual partitions of an AA multi-gene alignment or to infer per-partition trees in order to analyze tree compatibility. Example: raxmlHPC -f s -q part -s alg -m GTRCAT -n TEST. --> <!-- <parameter type="Switch" issimple="1" ismandatory="1"> <name>split_multigene</name> <attributes> <prompt>Split a multi-gene alignment into individual genes (-f s)</prompt> <precond> <language>perl</language> <code>defined $partition</code> </precond> <format> <language>perl</language> <code>($value) ? " -f s" : "" </code> </format> <ctrls> <ctrl> <message>In order to use this option, you must upload a set of bootstrapped trees using the -z option</message> <language>perl</language> <code>!defined $partition</code> </ctrl> </ctrls> <vdef> <value>0</value> </vdef> <comment> <value>The -f s option can be used to split a multi-gene alignment into individual genes, provided a model file with -q. This might be useful to select best fitting models for individual partitions of an AA multi-gene alignment or to infer per-partition trees in order to analyze tree compatibility. Example: raxmlHPC -f s -q part -s alg -m GTRCAT -n TEST</value> </comment> </attributes> </parameter> --> <!-- -M Switch on estimation of individual per-partition branch lengths. Only has effect when used in combination with -q and an alignment partition file. Branch lengths for individual partitions will be printed to separate files. A weighted average of the branch lengths is also computed by using the respective partition lengths (number of columns per partition). Note that, this does not take into account the "gappyness" of partitions, but I am currently not sure how to solve this problem. By default the -M option is turned off for partitioned analyses, i.e., RAxML will compute a joined branch length estimate. Example: raxmlHPC - alg -m GTRGAMMA -q part -M -n TEST.--> <parameter type="Switch" issimple="1" ismandatory="1"> <name>estimate_perpartbrlen</name> <attributes> <prompt>Estimate individual per-partition branch lengths (-M)</prompt> <precond> <language>perl</language> <code>defined $partition</code> </precond> <format> <language>perl</language> <code>($value) ? " -M" : "" </code> </format> <vdef> <value>0</value> </vdef> <comment> <value>The -M option switches on estimation of individual per-partition branch lengths. Only has effect when used in combination with -q and an alignment partition file. Branch lengths for individual partitions will be printed to separate files. A weighted average of the branch lengths is also computed by using the respective partition lengths (number of columns per partition). Note that, this does not take into account the "gappyness" of partitions, but I am currently not sure how to solve this problem. By default RAxML will compute a joined branch length estimate.</value> </comment> </attributes> </parameter> <!-- rev 5 --> <!-- Estimate proportion of invariable sites, mlsearch is a precondition --> <!-- The control should be of type switch, but I need the value of the parameter to be either "I" or "", so I made it type Excl instead. --> <!-- disabled 5/5/2009 based on information in the manual --> <parameter issimple="1" type="Excl" ismandatory="1"> <name>invariable</name> <attributes> <prompt>Estimate proportion of invariable sites (GTRGAMMA + I) (currently disabled)</prompt> <precond> <language>perl</language> <code>$placeholder</code> </precond> <vlist> <value>I</value> <label>yes</label> <value/> <label>no</label> </vlist> <vdef> <value>0</value> </vdef> <group>2</group> <comment> <value>This option is disabled in keeping with the developers best advice.</value> </comment> </attributes> </parameter> <!-- Substitution matrix for protein datatype, datatype=protein is a precondition --> <!-- Is used to build the -m model string --> <!-- A value must be selected when datatype=protein, so I added a vdef, so users wont have a job fail accidentally; this also highlights where the value needs to be set by populating the box with text--> <!-- Optional excludes these characters --> <!-- If user doesn't enter a value, the parameter won't be in the map that the web app sends the command renderer (which evaluates the perl for parameters that are present), so we don't need to worry about making the format code conditional; if null, this parameter just won't be evaluated. Command renderer evaluates 3 types of parameters: 1) those that appear in the map coming from the gui, those that are hidden, 3) those that are of type OutFile or Result. --> <parameter type="InFile" issimple="1"> <name>exclude_file</name> <attributes> <prompt>Create an input file that excludes the range of positions specifed in this file (-E)</prompt> <format> <language>perl</language> <code>" -E excl"</code> </format> <group>2</group> <filenames>excl</filenames> <comment> <value>This option is used to excludes specific positions in the matrix. For example, uploading a file that contains the text: 100-200 300-400 will create a file that excludes all columns between positions 100 and 200 as well as all columns between positions 300 and 400. Note that the boundary numbers (positions 100, 200, 300, and 400) will also be excluded. To exclude a single column write (100-100). This option does not run an analysis but just prints an alignment file without the excluded columns. Save this file to your data area, and then run the real analysis. If you use a mixed model, an appropriately adapted model file will also be written. The ntax element of the phylip files is automatically corrected Example: raxmlHPC -E excl -s infile -m GTRCAT -q part -n TEST. In this case the files with columns excluded will be named infile.excl and part.excl. </value> </comment> </attributes> </parameter> <!-- Nucleotides as sequences (not secondary structure --> <parameter type="Paragraph"> <paragraph> <name>nucleic_opts</name> <prompt>Nucleic Acid Options</prompt> <parameters> <!-- NUCLEOTIDES: "-m GTRCAT" : GTR + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated under GTRGAMMA, depending on the tree search option NOT SUPPORTED "-m GTRCAT_FLOAT" : Same as above but uses single-precision floating point arithmetics instead of double-precision Usage only recommend for testing, the code will run slower, but can save almost 50% of memory. If you have problems with phylogenomic datasets and large memory requirements you may give it a shot. Keep in mind that numerical stability seems to be okay but needs further testing. NOT SUPPORTED "-m GTRCATI" : GTR + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated under GTRGAMMAI, depending on the tree search option "-m GTRGAMMA" : GTR + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated) NOT SUPPORTED "-m GTRGAMMA_FLOAT" : Same as GTRGAMMA, but also with single-precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply. NOT SUPPORTED "-m GTRGAMMAI" : Same as GTRGAMMA, but with estimate of proportion of invariable sites --> <parameter type="Switch"> <name>dna_gtrcat</name> <attributes> <prompt>Use GTRCAT for the bootstrapping phase, and GTRGAMMA for the final tree inference (default)</prompt> <format> <language>perl</language> <code>($value) ? "-m GTRCAT" : "" </code> </format> <vdef> <value>1</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>!$dna_gtrgamma &amp;&amp; ($datatype eq "dna" || $datatype eq "rna")</code> </precond> <comment> <value>The meaning of the model name GTRGAMMA used by RAxML 7.2.0 is exactly opposite that used in RAxML 7.0.4, so we have eliminated selection by model name. Instead we use a description of the model analysis. This selection gives GTR + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into "numberOfCategories" distinct rate categories for greater computational efficiency. Final tree is evaluated under GTRGAMMA. GTRMIX and GTRCAT_GAMMA have been eliminated as options. FLOAT options that are native in RAxML 7.2.3 are currently not supported here. </value> </comment> </attributes> </parameter> <parameter type="Switch"> <name>dna_gtrgamma</name> <attributes> <prompt>Use GTRGAMMA for the bootstrapping phase and GTRGAMMA for the final tree (takes longer)</prompt> <format> <language>perl</language> <code>($value) ? "-m GTRGAMMA" : "" </code> </format> <vdef> <value>0</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>!$dna_gtrcat &amp;&amp; ($datatype eq "dna" || $datatype eq "rna") </code> </precond> <comment> <value>The meaning of the model names used by RAxML 7.2.0 are exactly opposite to those used in RAxML 7.0.4, so we have eliminated selection by model name. Instead we use a description of the model analysis. This option gives GTR + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated) for bootstrap AND final evaluation. An analysis run in this way will take a good deal longer than the alternative option (what used to be called GTRGAMMA in RAxML v.7.0.4). GTRMIX and GTRCAT_GAMMA have been eliminated as options. FLOAT options that are native in RAxML 7.2.3 are currently not supported here. </value> </comment> </attributes> </parameter> </parameters> </paragraph> </parameter> <parameter type="Paragraph"> <paragraph> <name>protein_opts</name> <prompt>Protein Analysis Options</prompt> <parameters> <!-- AMINO ACIDS: "-m PROTCATmatrixName[F]" : specified AA matrix + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under PROTGAMMAmatrixName[f], depending on the tree search option "-m PROTCATmatrixName[F]_FLOAT" : PROTCAT with single precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply "-m PROTCATImatrixName[F]" : specified AA matrix + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under PROTGAMMAImatrixName[f], depending on the tree search option "-m PROTGAMMAmatrixName[F]" : specified AA matrix + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m PROTGAMMAmatrixName[F]_FLOAT" : PROTGAMMA with single precision arithmetics, same cautionary notes as for GTRCAT_FLOAT apply "-m PROTGAMMAImatrixName[F]" : Same as PROTGAMMAmatrixName[F], but with estimate of proportion of invariable sites Available AA substitution models: DAYHOFF, DCMUT, JTT, MTREV, WAG, RTREV, CPREV, VT, BLOSUM62, MTMAM, LG, GTR With the optional "F" appendix you can specify if you want to use empirical base frequencies Please note that for mixed models you can in addition specify the per-gene AA model in the mixed model file (see manual for details). Also note that if you estimate AA GTR parameters on a partitioned dataset, they will be linked (estimated jointly) across all partitions to avoid over-parameterization --> <!-- not sure if we should support cat or not --> <parameter type="Excl" ismandatory="1"> <name>prot_sub_model</name> <attributes> <prompt>Choose GAMMA or CAT model:</prompt> <precond> <language>perl</language> <code>$datatype eq "protein"</code> </precond> <vlist> <value>PROTGAMMA</value> <label>Protein GAMMA</label> <value>PROTCAT</value> <label>Protein CAT</label> </vlist> <flist> <value>PROTGAMMA</value> <code>"-m PROTGAMMA$prot_matrix_spec$use_emp_freqs"</code> <value>PROTCAT</value> <code>"-m PROTCAT$prot_matrix_spec$use_emp_freqs"</code> </flist> <vdef> <value>PROTCAT</value> </vdef> <group>2</group> </attributes> </parameter> <parameter type="Excl" ismandatory="1"> <name>prot_matrix_spec</name> <attributes> <prompt>Protein Substitution Matrix</prompt> <precond> <language>perl</language> <code>$datatype eq "protein"</code> </precond> <vlist> <value>DAYHOFF</value> <label>DAYHOFF</label> <value>DCMUT</value> <label>DCMUT</label> <value>JTT</value> <label>JTT</label> <value>MTREV</value> <label>MTREV</label> <value>WAG</value> <label>WAG</label> <value>RTREV</value> <label>RTREV</label> <value>CPREV</value> <label>CPREV</label> <value>VT</value> <label>VT</label> <value>BLOSUM62</value> <label>BLOSUM62</label> <value>MTMAM</value> <label>MTMAM</label> <value>LG</value> <label>LG</label> <value>GTR</value> <label>GTR</label> </vlist> <vdef> <value>DAYHOFF</value> </vdef> <comment> <value>Note: FLOAT and invariable sites (I) options are not exposed here. If you require this option, please contact mmiller@sdsc.edu.</value> <value>-m PROTCATmatrixName: analyses using the specified AA matrix + Optimization of substitution rates + Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under PROTGAMMAmatrixName[f], depending on the tree search option. </value> <value>-m PROTGAMMAmatrixName[F] analyses use the specified AA matrix + Optimization of substitution rates + GAMMA model of rate heterogeneity (alpha parameter will be estimated)</value> <value>Available AA substitution models: DAYHOFF, DCMUT, JTT, MTREV, WAG, RTREV, CPREV, VT, BLOSUM62, MTMAM, LG, GTR. You can specify if you want to use empirical base frequencies. Please note that for mixed models you can in addition specify the per-gene AA model in the mixed model file (see manual for details). Also note that if you estimate AA GTR parameters on a partitioned dataset, they will be linked (estimated jointly) across all partitions to avoid over-parametrization.</value> </comment> </attributes> </parameter> <!--[-P proteinModel] Specify the file name of a user-defined AA (Protein) substitution model. This file must contain 420 entries, the first 400 being the AA substitution rates (this must be a symmetric matrix) and the last 20 are the empirical base frequencies --> <parameter type="InFile" > <name>user_prot_matrix</name> <attributes> <prompt>Upload a Custom Protein Substitution Matrix</prompt> <precond> <language>perl</language> <code>$datatype eq "protein"</code> </precond> <format> <language>perl</language> <code>"-P Userproteinmatrix.txt"</code> </format> <group>2</group> <filenames>Userproteinmatrix.txt</filenames> <comment> <value>Specify a file containing a user-defined Protein substitution model. This file must contain 420 entries, the first 400 entires are the AA substitution rates (this matrix must be symmetric) and the last 20 entries are the empirical base frequencies</value> </comment> </attributes> </parameter> <!-- Empirical Base Frequencies --> <parameter type="Excl"> <name>use_emp_freqs</name> <attributes> <prompt>Use empirical frequencies?</prompt> <precond> <language>perl</language> <code>$datatype eq "protein"</code> </precond> <vlist> <value>F</value> <label>Yes</label> <value></value> <label>No</label> </vlist> </attributes> </parameter> <!-- datatype=protein is a precondition, adds -F to end of -m model string --> </parameters> </paragraph> </parameter> <parameter type="Paragraph"> <paragraph> <name>Sec_structure_opts</name> <prompt>RNA Secondary Structure Options</prompt> <parameters> <!--***************************************************************************************************************************************************************************--> <!-- ******************************THESE FEATURES ARE READY TO GO, BUT WHO KNOWS IF THEY WORK************************************************--> <!-- [-S secondaryStructureFile] Specify the name of a secondary structure file. The file can contain "." for alignment columns that do not form part of a stem and characters. "()<>[]{}" to define stem regions and pseudoknots --> <parameter type="InFile" > <name>sec_str_file</name> <attributes> <precond> <language>perl</language> <code>$datatype eq "rna"</code> </precond> <prompt>Upload a Secondary Structure File (-S)</prompt> <filenames>sec_structure.txt</filenames> <format> <language>perl</language> <code>(defined $value) ? " -S sec_structure.txt" : ""</code> </format> <group>2</group> <comment> <value>This option allows you to provide a secondary structure file. The file can contain "." for alignment columns that do not form part of a stem and characters, while "(), [], and {}" are used to define stem regions and pseudoknots.</value> </comment> </attributes> </parameter> <!-- [-A secondaryStructureSubstModel] Specify one of the RNA secondary structure substitution models implemented in RAxML. The same nomenclature as in the PHASE manual is used, available models: S6A, S6B, S6C, S6D, S6E, S7A, S7B, S7C, S7D, S7E, S7F, S16, S16A, S16B DEFAULT: 16-state GTR model (S16) 6 state model nomenclature: http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node101.html 7 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node107.html 16 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node114.html --> <parameter type="Excl" ismandatory="1"> <name>rna_model</name> <attributes> <prompt>Use an RNA Secondary Structure Substitution Model (-A)</prompt> <precond> <language>perl</language> <code>defined $sec_str_file</code> </precond> <vlist> <value>S6A</value> <label>Six State Model A</label> <value>S6B</value> <label>Six State Model B</label> <value>S6C</value> <label>Six State Model C</label> <value>S6D</value> <label>Six State Model D</label> <value>S6E</value> <label>Six State Model E</label> <value>S7A</value> <label>Seven State Model A</label> <value>S7B</value> <label>Seven State Model B</label> <value>S7C</value> <label>Seven State Model C</label> <value>S7D</value> <label>Seven State Model D</label> <value>S7E</value> <label>Seven State Model E</label> <value>S7F</value> <label>Seven State Model F</label> <value>S16A</value> <label>Sixteen State Model A</label> <value>S16B</value> <label>Sixteen State Model B</label> </vlist> <vdef> <value>S16A</value> </vdef> <format> <language>perl</language> <code>"-A $value"</code> </format> <group>2</group> <comment> <value>Use this option to specify one of the 6, 7, or 16 state RNA secondary structure substitution models.The nomenclature is identical to that used in the program PHASE. For more information, see PHASE documentation: 6 state model nomenclature: http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node101.html; 7 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node107.html; 16 state model nomenclature http://www.cs.manchester.ac.uk/ai/Software/phase/manual/node114.html</value> </comment> </attributes> </parameter> </parameters> </paragraph> </parameter> <parameter type="Paragraph"> <paragraph> <name>bin_opts</name> <prompt>Binary Matrix Options</prompt> <parameters> <!-- -m argument to specify substitution model is required --> <!-- mmiller changed the way this is done I exposed the other nucleic acid model options on 8/3/2009 --> <!-- [ -m Model of Binary (Morphological), Nucleotide or Amino Acid Substitution]: BINARY: (NEW for 7.2.0) "-m BINCAT" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under BINGAMMA, depending on the tree search option "-m BINCATI" : Optimization of site-specific evolutionary rates which are categorized into numberOfCategories distinct rate categories for greater computational efficiency. Final tree might be evaluated automatically under BINGAMMAI, depending on the tree search option "-m BINGAMMA" : GAMMA model of rate heterogeneity (alpha parameter will be estimated) "-m BINGAMMAI" : Same as BINGAMMA, but with estimate of proportion of invariable sites The I option is not added in this interface --> <parameter type="Excl" ismandatory="1"> <name>bin_model</name> <attributes> <prompt>Binary data model (-m)</prompt> <precond> <language>perl</language> <code>$datatype eq "binary"</code> </precond> <vlist> <value>BINCAT</value> <label>Binary CAT</label> <value>BINGAMMA</value> <label>Binary GAMMA</label> </vlist> <vdef> <value>BINCAT</value> </vdef> <format> <language>perl</language> <code>"-m $value"</code> </format> <group>2</group> <comment> <value>Binary data is handled in RAXML 7.2.0. Binary CAT use optimization of site-specific evolutionary rates, which are categorized into numberOfCategories (option -c) distinct rate categories for greater computational efficiency. Final tree might be evaluatedautomatically under BINGAMMA, depending on the tree search option. </value> <value>Binary GAMMA uses the GAMMA model of rate heterogeneity (alpha parameter will be estimated). The option for invariable sites is not provided at this time. The program's author supports the use of Gamma models.</value> </comment> </attributes> </parameter> </parameters> </paragraph> </parameter> <parameter type="Paragraph"> <paragraph> <name>set_analysis</name> <prompt>Select the Analysis</prompt> <parameters> <!-- rev 3 --> <!-- added for jbmunro --> <parameter type="Switch"> <name>startingtreeonly</name> <attributes> <prompt>Only compute a randomized parsimony starting tree (-y)</prompt> <format> <language>perl</language> <code> ($value)?" -y":""</code> </format> <vdef> <value>0</value> </vdef> <precond> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp;!$bootstrap_seed &amp;&amp; !$bipartitions &amp;&amp; ! defined $constraint &amp;&amp; !$log_likelihood &amp;&amp; !$compute_ml_distances &amp;&amp; !$specify_runs</code> </precond> <group>2</group> <comment> <value>If you want to only compute a randomized parsimony starting tree with RAxML and not execute an ML analysis of the tree specify -y. The program will exit after computation of the starting tree. This option can be useful if you want to assess the impact of randomized MP and Neighbor Joining starting trees on your search algorithm. They can also be used e.g. as starting trees for Derrick Zwickls GARLI program for ML inferences, which needs comparatively good starting trees to work well above approximately 500 taxa. </value> </comment> </attributes> </parameter> <!-- added for jbmunro --> <!-- -f d: DEFAULT, RAxML will execute the new (as of version 2.2.1) and significantly faster rapid hill-climbing algorithm [3]. --> <!-- added for jbmunro --> <!-- <parameter type="Switch"> <name>hillclimb</name> <attributes> <prompt>Use the new, faster rapid hill-climbing algorithm (-f d)</prompt> <format> <language>perl</language> <code> ($value)?" -f d ":""</code> </format> <vdef> <value>1</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>!$bipartitions &amp;&amp; !$startingtreeonly &amp;&amp; !$log_likelihood</code> </precond> <comment> <value>This is the deault option. RAxML will execute the new (as of version 2.2.1) and significantly faster rapid hill-climbing algorithm</value> </comment> </attributes> </parameter> --> <!-- -#|-N numberOfRuns Specifies the number of alternative runs on distinct starting trees, e.g., if -# 10 or -N 10 is specfied RAxML will compute 10 distinct ML trees starting from 10 distinct randomized maximum parsimony starting trees. --> <parameter type="Switch" ismandatory="1"> <name>specify_runs</name> <attributes> <prompt>Specify the number alternative runs on distinct starting trees? (-#/-N)</prompt> <precond> <language>perl</language> <code>!$bootstrap_seed &amp;&amp; !$mulparambootstrap_seed &amp;&amp; !$bipartitions &amp;&amp; !$startingtreeonly &amp;&amp; !$log_likelihood</code> </precond> <comment> <value>This option specifies the number of alternative runs on distinct starting trees. For example, if -N 10 is specfied, RAxML will compute 10 distinct ML trees starting from 10 distinct randomized maximum parsimony starting trees. </value> </comment> </attributes> </parameter> <parameter type="Integer" ismandatory="1"> <name>altrun_number</name> <attributes> <prompt>Enter number of number alternative runs</prompt> <precond> <language>perl</language> <code>$specify_runs</code> </precond> <format> <language>perl</language> <code>"-f d -N $value"</code> </format> <group>15</group> <ctrls> <ctrl> <message>Please specify how many runs you wish to execute (eg 10)</message> <language>perl</language> <code>$specify_runs &amp;&amp; !defined $altrun_number</code> </ctrl> </ctrls> <comment> <value>if -N 10 is specfied, RAxML will compute 10 distinct ML trees starting from 10 distinct randomized maximum parsimony starting trees.</value> </comment> </attributes> </parameter> <!-- added for jbmunro --> <parameter type="Switch"> <name>bipartitions</name> <attributes> <prompt>Draw bipartitions onto a single tree topology. (-f b)</prompt> <format> <language>perl</language> <code> ($value)?" -f b ":""</code> </format> <vdef> <value>0</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp;!$bootstrap_seed &amp;&amp; !$startingtreeonly &amp;&amp; !$log_likelihood &amp;&amp; !$compute_ml_distances &amp;&amp; !$specify_runs</code> </precond> <ctrls> <ctrl> <message>To use the -f b option you must specify a best tree with "-t" and file containing multiple trees with the "-z" option</message> <language>perl</language> <code>!$bootstrap_seed &amp;&amp; $bipartitions &amp;&amp; ( !defined $bunchotops || !defined $treetop)</code> </ctrl> </ctrls> <comment> <value>When this is specified, RAxML draws the bipartitions using a bunch of topologies (typically boot-strapped trees) specified with -z onto a single tree topology specified by -t (typically the best-scoring ML tree). </value> </comment> </attributes> </parameter> <!-- -f h: RAxML will compute a log likelihood test (SH-test [21]) between a best tree passed via -t and a bunch of other trees passed via -z. Example: raxmlHPC -f h -t ref -z trees -s alg -m GTRGAMMA -n TEST. --> <!-- added for Ryan Lower raxmlHPC --> <parameter type="Switch"> <name>log_likelihood</name> <attributes> <prompt>Compute a log likelihood test (-f h)</prompt> <format> <language>perl</language> <code> ($value)?" -f h ":""</code> </format> <vdef> <value>0</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp; !$bootstrap_seed &amp;&amp; !$startingtreeonly &amp;&amp; !$bipartitions &amp;&amp; !$compute_ml_distances &amp;&amp; !$specify_runs</code> </precond> <ctrls> <ctrl> <message>To use the -f h option you must specify a best tree with "-t" and file containing multiple trees with the "-z" option</message> <language>perl</language> <code>$log_likelihood &amp;&amp; (!defined $bunchotops || !defined $treetop)</code> </ctrl> </ctrls> <comment> <value>When this is specified, RAxML will compute a log likelihood test (SH-test [21]) between a best tree passed via -t and a bunch of other trees passed via -z. Example: raxmlHPC -f h -t ref -z trees -s alg -m GTRGAMMA -n TEST</value> </comment> </attributes> </parameter> <!-- [-J MR|MRE] Compute majority rule consensus tree with "-J MR" or extended majority rule consensus tree with "-J MRE" You will need to provide a tree file containing several trees via "-z"--> <parameter type="Switch" ismandatory="1"> <name>compute_mr</name> <attributes> <prompt>Compute majority rule consensus tree (-J; GAMMA models only)</prompt> <vdef> <value>0</value> </vdef> <precond> <language>perl</language> <code>$dna_gtrgamma || $bin_model eq "BINGAMMA" || $prot_sub_model eq "PROTGAMMA"</code> </precond> <comment> <value>This option allows the user to compute majority rule consensus tree or extended majority rule consensus tree from an uploaded file containing several trees (-z)</value> </comment> </attributes> </parameter> <parameter type="Excl" ismandatory="1"> <name>specify_mr</name> <attributes> <prompt>Specify majority rule consensus tree (-J) technique </prompt> <precond> <language>perl</language> <code>$compute_mr</code> </precond> <format> <language>perl</language> <code>"-J $value"</code> </format> <vlist> <value>MR</value> <label>Majority rule</label> <value>MRE</value> <label>Extended majority rule</label> </vlist> <ctrls> <ctrl> <message>Please select a majority rule option: MR or MRE</message> <language>perl</language> <code>!$specify_mr</code> </ctrl> <ctrl> <message>In order to use the -J option, you must upload a set of bootstrapped trees using the -z option</message> <language>perl</language> <code>$compute_mr &amp;&amp; !defined $bunchotops</code> </ctrl> </ctrls> <comment> <value>A badly formatted tree file may produce an error like this: /u/ac/cipres/ngbw/contrib/tools/bin/wrap.sh: line 21: 31550 Segmentation fault $* </value> </comment> </attributes> </parameter> <!-- added for jbmunro --> <parameter type="InFile"> <name>bunchotops</name> <attributes> <prompt>File with topologies for bipartitions or bootstopping (-z)</prompt> <format> <language>perl</language> <code>" -z topologies_file.tre"</code> </format> <group>2</group> <filenames>topologies_file.tre</filenames> <precond> <language>perl</language> <code>($bipartitions || $log_likelihood || $compute_mr) &amp;&amp; !defined $apo_tops</code> </precond> <!-- <ctrls> <ctrl> <message>You must specify a starting tree (via the -t option above) to use the -z option</message> <language>perl</language> <code>$bunchotops &amp;&amp; !defined $treetop</code> </ctrl> </ctrls> --> <comment> <value>The -z option is used in combination with the -f b,-f h,-f m,-f n options. The uploaded file should contain a number of trees in NEWICK format. The file should contain one tree per line without blank lines between trees. For example, you can directly read in a RAxML bootstrap result file.</value> </comment> </attributes> </parameter> <!-- "-f x": compute pair-wise ML distances, ML model parameters will be estimated on an MP starting tree or a user-defined tree passed via "-t"--> <parameter type="Switch" ismandatory="1"> <name>compute_ml_distances</name> <attributes> <prompt>Compute pair-wise ML distances (-f x; GAMMA models only)</prompt> <vdef> <value>0</value> </vdef> <precond> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp; !$bootstrap_seed &amp;&amp; !$bipartitions &amp;&amp; !$startingtreeonly &amp;&amp; !$log_likelihood &amp;&amp; !$specify_runs</code> </precond> <format> <language>perl</language> <code> ($value)?" -f x ":""</code> </format> <ctrls> <ctrl> <message>You must specify a starting tree (via the -t option above) to use the -f x option</message> <language>perl</language> <code>$compute_ml_distances &amp;&amp; ! defined $treetop</code> </ctrl> <ctrl> <message>Sorry, the -f x option is valid only with GAMMA models</message> <language>perl</language> <code>$compute_ml_distances &amp;&amp; !$dna_gtrgamma &amp;&amp; $bin_model ne "BINGAMMA" &amp;&amp; $prot_sub_model ne "PROTGAMMA"</code> </ctrl> </ctrls> <comment> <value>Compute pair-wise ML distances, ML model parameters will be estimated on an MP starting tree or a user-defined tree passed via "-t".</value> </comment> </attributes> </parameter> </parameters> </paragraph> </parameter> <!-- under development for large DS --> <!-- [-D] ML search convergence criterion. This will break off ML searches if the relative Robinson-Foulds distance large trees between the trees obtained from two consecutive lazy SPR cycles is smaller or equal to 1%. Usage recommended for very large datasets in terms of taxa. On trees with more than 500 taxa this will yield execution time improvements of approximately 50% while yielding only slightly worse trees. DEFAULT: OFF --> <!-- [-F] enable ML tree searches under CAT model for very large trees without switching to GAMMA in the end (saves memory) and print out some additional files (large trees) with intermediate trees from the search. This option can also be used with the GAMMA models in order to avoid the thorough optimization of the best-scoring ML tree in the end. DEFAULT: OFF--> <!-- [-L sequenceSimilarityThreshold] Same functionality as "-l" above, but uses a less exhaustive and thus faster clustering algorithm. This is intended for very large datasets with more than (large data sets) 20,000-30,000 sequences DEFAULT: OFF--> <!-- --> <parameter type="Paragraph"> <paragraph> <name>bootstrap_config</name> <prompt>Configure Bootstrapping</prompt> <parameters> <!--add for jbmunro--> <!-- I split the seeds into invocation, and number entry, due to command line conflicts --> <parameter type="Switch" ismandatory="1"> <name>mulparambootstrap_seed</name> <attributes> <prompt>Conduct Multiparametric Bootstrapping? (-b)</prompt> <vdef> <value>0</value> </vdef> <precond> <language>perl</language> <code>!$bootstrap_seed &amp;&amp; !$startingtreeonly &amp;&amp; !$compute_ml_distances &amp;&amp; !$bipartitions &amp;&amp; !$log_likelihood &amp;&amp; !$compute_mr &amp;&amp; !$specify_runs</code> </precond> <comment> <value>This option allows you to turn on non-parametric bootstrapping. To allow for reproducibility of runs in the sequential program, you have to specify a random number seed. </value> </comment> </attributes> </parameter> <parameter type="Integer" ismandatory="1"> <name>mulparambootstrap_seed_val</name> <attributes> <prompt>Enter a random seed value for multi-parametric bootstrapping</prompt> <format> <language>perl</language> <code>($value) ? " -b $value" : ""</code> </format> <vdef> <value>12345</value> </vdef> <precond> <language>perl</language> <code>$mulparambootstrap_seed</code> </precond> <group>2</group> <ctrls> <ctrl> <message>Please enter a random seed for the -b option (eg 12345)</message> <language>perl</language> <code>$mulparambootstrap_seed &amp;&amp; !defined $mulparambootstrap_seed_val</code> </ctrl> </ctrls> <comment> <value>This random number is provided to assure that there is comparability between runs.</value> </comment> </attributes> </parameter> <!--add for jbmunro--> <!-- I split the seeds into invocation, and number entry, due to command line conflicts --> <parameter type="Switch" ismandatory="1"> <name>bootstrap_seed</name> <attributes> <prompt>Conduct rapid bootstrapping? (-x)</prompt> <vdef> <value>1</value> </vdef> <precond> <language>perl</language> <code>!$mulparambootstrap_seed &amp;&amp; !$startingtreeonly &amp;&amp; !$compute_ml_distances &amp;&amp; !$bipartitions &amp;&amp; !$log_likelihood &amp;&amp; !$compute_mr &amp;&amp; !$specify_runs</code> </precond> <ctrls> <ctrl> <message>Sorry, uploading a starting tree is not valid with the -x option</message> <language>perl</language> <code>defined $treetop</code> </ctrl> </ctrls> <comment> <value>This option offers a novel rapid Bootstrapping algorithm that is faster by at least one order of magnitude than all other current implementations (RAxML 2.2.3, GARLI, PHYML). The results obtained are qualitatively comparable to those obtained via the standard RAxML BS algorithm and, more importantly, the deviations in support values between the rapid and the standard RAxML BS algorithm are smaller than those induced by using a different search strategy, e.g. GARLI or PHYML. This rapid BS search can be combined with a rapid ML search on the original alignment and thus allows users to conduct a full ML analysis within one single program run.</value> </comment> </attributes> </parameter> <parameter type="Integer" ismandatory="1"> <name>bootstrap_seed_val</name> <attributes> <prompt>Enter a random seed value for rapid bootstrapping</prompt> <format> <language>perl</language> <code>($value) ? " -x $value" : ""</code> </format> <vdef> <value>12345</value> </vdef> <group>2</group> <precond> <language>perl</language> <code>$bootstrap_seed &amp;&amp; !$mulparambootstrap_seed &amp;&amp; !$startingtreeonly &amp;&amp; !$compute_ml_distances</code> </precond> <ctrls> <ctrl> <message>Please enter a random seed for the -x option (eg 12345)</message> <language>perl</language> <code>$bootstrap_seed &amp;&amp; !defined $bootstrap_seed_val</code> </ctrl> </ctrls> <comment> <value>This random number is provided to assure that there is comparability between runs.</value> </comment> </attributes> </parameter> <!-- rev 6 --> <!-- Do maximum likelihood search after bootstrap --> <parameter type="Switch"> <name>mlsearch</name> <attributes> <prompt>Conduct a rapid Bootstrap analysis and search for the best-scoring ML tree in one single program run. (-f a)</prompt> <format> <language>perl</language> <code> ($value)?" -f a ":""</code> </format> <ctrls> <ctrl> <message>Sorry, uploading a starting tree is not valid with the -f a option</message> <language>perl</language> <code>defined $treetop</code> </ctrl> </ctrls> <vdef> <value>0</value> </vdef> <group>2</group> <comment> <value>Tell RAxML to conduct a Rapid Bootstrap analysis (-x) and search for the best-scoring ML tree in one single program run. </value> </comment> <precond> <language>perl</language> <code>!$bipartitions &amp;&amp; !$startingtreeonly &amp;&amp; $bootstrap_seed &amp;&amp; !$compute_ml_distances</code> </precond> </attributes> </parameter> <!-- [-k] Specifies that bootstrapped trees should be printed with branch lengths. The bootstraps will run a bit longer, because model parameters will be optimized at the end of each run under GAMMA or GAMMA+P-Invar respectively. DEFAULT: OFF (old version said Use with CATMIX/PROTMIX or GAMMA/GAMMAI., check on this)--> <parameter type="Switch"> <name>printbrlength</name> <attributes> <prompt>Print branch lengths (-k)</prompt> <format> <language>perl</language> <code> ($value)?" -k":""</code> </format> <vdef> <value>0</value> </vdef> <group>2</group> <comment> <value> The -k option causes bootstrapped trees to be printed with branch lengths. The bootstraps will require a bit longer to run under this option because model parameters will be optimized at the end of each run under GAMMA or GAMMA+P-Invar respectively. </value> </comment> </attributes> </parameter> <!-- I replaced this with a split inference, so the parsimony tree alone will work ok. <parameter type="Integer" ismandatory="1"> <name>parsimony_seed_val</name> <attributes> <prompt>Specify a random number seed for the parsimony inferences.</prompt> <format> <language>perl</language> <code>"-p $value"</code> </format> <vdef> <value>12345</value> </vdef> <group>2</group> <comment> <value>A Maximum Parsimony starting tree is constructed if no user-defined tree is provided. This random number is provided to assure that there is comparability between runs by keeping a consistent starting tree. </value> </comment> </attributes> </parameter> --> <!-- rev 6 --> <!-- Bootstrap iterations, either 100 or auto --> <!-- I split the seeds into invocation, and number entry, due to command line conflicts --> <parameter type="Switch" ismandatory="1"> <name>specify_bootstraps</name> <attributes> <prompt>Specify an Explicit Number of Bootstraps</prompt> <vdef> <value>1</value> </vdef> <precond> <language>perl</language> <code>($bootstrap_seed || $mulparambootstrap_seed) &amp;&amp; !$use_bootstopping</code> </precond> </attributes> </parameter> <parameter type="Integer" ismandatory="1"> <name>bootstop</name> <attributes> <prompt>Bootstrap iterations (-#|-N)</prompt> <precond> <language>perl</language> <code>$specify_bootstraps &amp;&amp; !$startingtreeonly</code> </precond> <format> <language>perl</language> <!-- replaced -# with -N for TG --> <code>" -N $value"</code> </format> <vdef> <value>100</value> </vdef> <group>2</group> <ctrls> <ctrl> <message>Please enter number of bootstrap reps desired (eg 100)</message> <language>perl</language> <code>$specify_bootstraps &amp;&amp; !defined $bootstop</code> </ctrl> </ctrls> <comment> <value>Specifies the number of alternative runs on distinct starting trees. If 10, RAxML computes 10 distinct ML trees starting from 10 distinct randomized maximum parsimony starting trees. In combination with the Random seed for rapid bootstrap (-x) invoke a rapid BS analysis. </value> </comment> </attributes> </parameter> <!-- Activate programmatic bootstopping --> <parameter type="Switch" ismandatory="1"> <name>use_bootstopping</name> <attributes> <prompt>Let RAxML halt bootstrapping automatically</prompt> <precond> <language>perl</language> <code>($bootstrap_seed || $mulparambootstrap_seed) &amp;&amp; !$specify_bootstraps</code> </precond> <vdef> <value>0</value> </vdef> <ctrls> <ctrl> <message>Please select to "specify bootstraps explicitly" or "automatically halt bootstrapping"</message> <language>perl</language> <code> ($bootstrap_seed || $mulparambootstrap_seed) &amp;&amp; (!$use_bootstopping &amp;&amp; !$specify_bootstraps)</code> </ctrl> </ctrls> <comment> <value>This option instructs Raxml to automatically halt bootstrapping when certain criteria are met, instead of specifying the number of bootstraps for an analysis. The exact criteria are specified/configured using subsequent entry fields.</value> </comment> </attributes> </parameter> <!-- <parameter type="Excl" ismandatory="1"> <name>select_bootstopping</name> <attributes> <prompt>Select Bootstopping Strategy</prompt> <vdef> <value>otf_bootstopping</value> </vdef> <vlist> <value>otf_bootstopping</value> <label>On-the-fly Bootstopping</label> <value>apo_bootstopping</value> <label>A posteriori bootstrapping</label> </vlist> <precond> <language>perl</language> <code>($bootstrap_seed || $mulparambootstrap_seed) &amp;&amp; !$specify_bootstraps</code> </precond> <ctrls> <ctrl> <message>Please choose either on-the-fly or a posteriori bootstrapping</message> <language>perl</language> <code>$use_bootstopping &amp;&amp; ($select_bootstopping ne "otf_bootstopping" &amp;&amp; $select_bootstopping ne "apo_bootstopping")</code> </ctrl> </ctrls> <comment> <value>This option allows the user to let Raxml "decide" whether bootstrapping is stopped (i.e. "bootstopping") by criteria determined "on-the-fly" or based on a set of bootstrapped trees uploaded as a single file using the -z option. Once this selection is made, the user must select whether the bootstopping threshold is determined using frequency criteria or majority rule criteria</value> </comment> </attributes> </parameter> --> <!-- [-#|-N numberOfRuns|autoFC|autoMR|autoMRE|autoMRE_IGN] Specify the number of alternative runs on distinct starting trees In combination with the "-b" option, this will invoke a multiple bootstrap analysis Note that "-N" has been added as an alternative since "-#" sometimes caused problems with certain MPI job submission systems, since "-#" is often used to start comments. If you want to use the bootstopping criteria specify "-# autoMR" or "-# autoMRE" or "-# autoMRE_IGN" for the majority-rule tree based criteria (see -I option) or "-# autoFC" for the frequency-based criterion. Bootstopping will only work in combination with "-x" or "-b" DEFAULT: 1 single analysis --> <parameter type="Switch" ismandatory="1"> <name>freq_bootstopping</name> <attributes> <prompt>Stop Bootstrapping Automatically with Frequency Criterion</prompt> <precond> <language>perl</language> <code>$use_bootstopping</code> </precond> <format> <language>perl</language> <!-- Replace -# with -N --> <code>($value) ? "-N autoFC":""</code> </format> <vdef> <value>0</value> </vdef> <ctrls> <ctrl> <message>Sorry, you cannot choose both majority rule and frequency criterion</message> <language>perl</language> <code>$mr_bootstopping &amp;&amp; $freq_bootstopping </code> </ctrl> <ctrl> <message>Please choose either majority rule and frequency criterion</message> <language>perl</language> <code>$use_bootstopping &amp;&amp; !$mr_bootstopping &amp;&amp; !$freq_bootstopping </code> </ctrl> </ctrls> <comment> <value>If you want to use the bootstopping criteria specify "-# autoMR" or "-# autoMRE" or "-# autoMRE_IGN" for the majority-rule tree based criteria (see -I option) or "-# autoFC" for the frequency-based criterion. Bootstopping will only work in combination with "-x" or "-b"</value> </comment> </attributes> </parameter> <parameter type="Switch" ismandatory="0"> <name>mr_bootstopping</name> <attributes> <prompt>Stop Bootstrapping Automatically with Majority Rule Criterion</prompt> <precond> <language>perl</language> <code>$use_bootstopping</code> </precond> <vdef> <value>0</value> </vdef> <ctrls> <ctrl> <message>Sorry, you cannot choose both majority rule and frequency criterion</message> <language>perl</language> <code>$use_bootstopping &amp;&amp; !$mr_bootstopping &amp;&amp; !$freq_bootstopping</code> </ctrl> <ctrl> <message>Please choose either majority rule and frequency criterion</message> <language>perl</language> <code>!$mr_bootstopping &amp;&amp; !$freq_bootstopping </code> </ctrl> </ctrls> <comment> <value>If you want to use the bootstopping criteria specify "-# autoMR" or "-# autoMRE" or "-# autoMRE_IGN" for the majority-rule tree based criteria (see -I option) or "-# autoFC" for the frequency-based criterion. Bootstopping will only work in combination with "-x" or "-b"</value> </comment> </attributes> </parameter> <parameter type="Excl" ismandatory="1"> <name>mrbootstopping_type</name> <attributes> <prompt>Select Majority Rule Criterion: (autoMRE is usually preferred)</prompt> <precond> <language>perl</language> <code>$mr_bootstopping</code> </precond> <format> <language>perl</language> <!--replace -# with -N --> <code>"-N $value"</code> </format> <vlist> <value>autoMR</value> <label>autoMR</label> <value>autoMRE</value> <label>autoMRE</label> <value>autoMRE_IGN</value> <label>autoMRE_IGN</label> </vlist> <vdef> <value>autoMRE</value> </vdef> <ctrls> <ctrl> <message>Please choose a Majority Rule criterion</message> <language>perl</language> <code>$mr_bootstopping &amp;&amp; !defined $mrbootstopping_type</code> </ctrl> </ctrls> </attributes> </parameter> <!-- The [-B wcCriterionThreshold] feature is eliminated in 7.2.3. Specify a floating point number between 0.0 and 1.0 that will be used as cutoff threshold for the WC bootstopping criterion. The recommended setting is 0.03. 7.2.0 DEFAULT: 0.03 (recommended empirically determined setting) --> <!-- This option is used when a bootstrap analysis is already completed. Upload an input file, a tree, and bootstrapped trees [-I autoFC|autoMR|autoMRE|autoMRE_IGN] a posteriori bootstopping analysis. Use: "-I autoFC" for the frequency-based criterion "-I autoMR" for the majority-rule consensus tree criterion "-I autoMRE" for the extended majority-rule consensus tree criterion "-I autoMRE_IGN" for metrics similar to MRE, but include bipartitions under the threshold whether they are compatible or not. This emulates MRE but is faster to compute. For any of these options, you also need to pass a tree file containing several bootstrap replicates via "-z" --> <parameter type="Switch"> <name>use_apobootstopping</name> <attributes> <prompt>Use a posteriori bootstrapping</prompt> <vdef> <value>0</value> </vdef> <ctrls> <ctrl> <message>Sorry, you cannot use a posteriori bootstrapping with the -b or -x options</message> <language>perl</language> <code>$use_apobootstopping &amp;&amp; ($bootstrap_seed || $mulparambootstrap_seed)</code> </ctrl> </ctrls> <comment> <value>This option is used when a bootstrap analysis is already completed. Upload an input file, a tree, and bootstrapped trees. You must upload a set of bootstrapped trees uploaded as a single file using the -z option. Once this selection is made, the user must select whether the bootstopping threshold is determined using frequency criteria or majority rule criteria</value> </comment> </attributes> </parameter> <!-- a priori bootstopping (-I) is mutually exclusive with on the fly bootstopping (-N) --> <parameter type="Excl" ismandatory="1"> <name>aposterior_bootstopping</name> <attributes> <prompt>Select the criterion for a posteriori bootstopping analysis</prompt> <precond> <language>perl</language> <code>$use_apobootstopping</code> </precond> <format> <language>perl</language> <code>"-I $value"</code> </format> <vlist> <value>autoFC</value> <label>autoFC</label> <value>autoMR</value> <label>autoMR</label> <value>autoMRE</value> <label>autoMRE</label> <value>autoMRE_IGN</value> <label>autoMRE_IGN</label> </vlist> <ctrls> <ctrl> <message>In order to use the a posteriori bootstrapping option, you must upload a set of bootstrapped trees below</message> <language>perl</language> <code>!defined $apo_tops</code> </ctrl> </ctrls> <comment> <value>This option allows the user to conduct a posteriori bootstopping analysis based on a set of bootstrapped trees. Use: autoFC for the frequency-based criterion, autoMR for the majority-rule consensus tree criterion, autoMRE for the extended majority-rule consensus tree criterion and autoMRE_IGN for metrics similar to MRE, but include bipartitions under the threshold whether they are compatible or not. This emulates MRE but is faster to compute. For any of these options, you also need to upload a tree file containing several bootstrap replicates via "-z"</value> </comment> </attributes> </parameter> <parameter type="InFile"> <name>apo_tops</name> <attributes> <prompt>File with topologies for a posteriori bootstopping (-z)</prompt> <format> <language>perl</language> <code>" -z apotopologies_file.tre"</code> </format> <group>2</group> <filenames>apotopologies_file.tre</filenames> <precond> <language>perl</language> <code>$use_apobootstopping &amp;&amp; !defined $bunchotops</code> </precond> <comment> <value></value> </comment> </attributes> </parameter> </parameters> </paragraph> </parameter> <!-- When we're done with raxml, run consense on the bootstrapped trees to produce a file with a consense tree in "outtree". A file named outfile is also produced by we don't want to return it to the user. --> <!-- <parameter type="String" ishidden="1"> <name>consense</name> <attributes> <group>99</group> <format> <language>perl</language> <code> "&amp;&amp; ( echo RAxML_bootstrap.result &gt; c.p; echo 1 &gt;&gt; c.p; echo 2 &gt;&gt; c.p; echo 3 &gt;&gt; c.p; echo Y &gt;&gt; c.p; consense &lt; c.p )" </code> </format> <precond> <language>perl</language> <code>!startingtreeonly</code> </precond> </attributes> </parameter> --> <!-- result of running consense --> <!-- <parameter type="Results"> <name>consense_output</name> <attributes> <filenames>outtree</filenames> </attributes> </parameter> --> <!-- return all output files from raxml in one big mess --> <parameter type="Results"> <name>all_outputfiles</name> <attributes> <filenames>*</filenames> </attributes> </parameter> <!-- <parameter type="Results"> <name>outputfile</name> <attributes> <filenames>*.result</filenames> </attributes> </parameter> <parameter type="Results"> <name>all_commandline</name> <attributes> <filenames>COMMANDLINE</filenames> </attributes> </parameter> <parameter type="Results"> <name>all_excludeFile</name> <attributes> <filenames>*.excl</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>results_files</name> <attributes> <filenames>RAxML_log.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outresult</name> <attributes> <filenames>RAxML_result.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outinfo</name> <attributes> <filenames>RAxML_info.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outparsimonytree</name> <attributes> <filenames>RAxML_parsimonyTree.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outrandomtree</name> <attributes> <filenames>RAxML_randomTree.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outcheckpoint</name> <attributes> <filenames>RAxML_checkpoint.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outbootstrap</name> <attributes> <filenames>RAxML_bootstrap.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outbipartitions</name> <attributes> <filenames>RAxML_bipartitions.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outreducedlist</name> <attributes> <filenames>RAxML_reducedList.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outbipartitionfrequencies</name> <attributes> <filenames>RAxML_bipartitionFrequencies.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outpersitells</name> <attributes> <filenames>RAxML_perSiteLLs.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>outbesttree</name> <attributes> <filenames>RAxML_bestTree.result</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>reduced_results</name> <attributes> <filenames>*.reduced</filenames> </attributes> </parameter> <parameter type="OutFile"> <name>txt_results</name> <attributes> <filenames>*.txt</filenames> </attributes> </parameter> --> </parameters> </pise>
{ "content_hash": "0516173de6fa74b1bec185e0f33e5db6", "timestamp": "", "source": "github", "line_count": 3042, "max_line_length": 670, "avg_line_length": 49.013477975016436, "alnum_prop": 0.6150074782527046, "repo_name": "SciGaP/DEPRECATED-Cipres-Airavata-POC", "id": "3efc32591d02c55f4d7320206ca22aa32d24cdcf", "size": "149099", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "saminda/cipres-airavata/sdk/src/main/resources/pisexml/OBSOLETE/raxmlhpc2_abe727.xml", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "210" }, { "name": "CSS", "bytes": "50176" }, { "name": "HTML", "bytes": "217837" }, { "name": "Java", "bytes": "5592181" }, { "name": "JavaScript", "bytes": "35732" }, { "name": "Makefile", "bytes": "152" }, { "name": "PHP", "bytes": "132" }, { "name": "Perl", "bytes": "61968" }, { "name": "Python", "bytes": "510182" }, { "name": "Shell", "bytes": "218009" }, { "name": "TeX", "bytes": "15328" }, { "name": "XML", "bytes": "12094076" } ], "symlink_target": "" }
 using System.Collections.Generic; namespace EED.Domain { public class Office { public int Id { get; set; } public string Name { get; set; } public int NumberOfPositions { get; set; } public OfficeType OfficeType { get; set; } public DistrictType DistrictType { get; set; } public ElectionProject Project { get; set; } public IList<Contest> Contests { get; set; } } public enum OfficeType { Candidacy = 0, Measurement = 1 } }
{ "content_hash": "271160d7cade04382084d2063cf3703b", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 54, "avg_line_length": 23.863636363636363, "alnum_prop": 0.5923809523809523, "repo_name": "anakrivokuca/EED", "id": "f895158799b28186df4f691140c27f47940f3332", "size": "527", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/EED.Domain/Office.cs", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "101" }, { "name": "C#", "bytes": "404798" }, { "name": "CSS", "bytes": "15390" }, { "name": "JavaScript", "bytes": "5755" } ], "symlink_target": "" }
namespace FishEditor { bool Selection::m_isActiveGameObjectLocked = false; std::weak_ptr<Object> Selection::s_activeObject; std::list<std::weak_ptr<Transform>> Selection::s_transforms; std::weak_ptr<GameObject> Selection::s_activeGameObject; std::weak_ptr<Transform> Selection::s_activeTransform; Action Selection::selectionChanged; Action Selection::activeObjectChanged; void Selection::setActiveGameObject(GameObjectPtr gameObject) { s_activeGameObject = gameObject; selectionChanged(); } void Selection::setActiveTransform(TransformPtr transform) { //m_activeTransform = transform; s_transforms.clear(); if (transform != nullptr) s_transforms.push_back(transform); } FishEngine::GameObjectPtr Selection::activeGameObject() { auto const & t = activeTransform(); if (nullptr != t) return t->gameObject(); return nullptr; } void Selection::setTransforms(const std::list<std::weak_ptr<FishEngine::Transform> > &transforms) { s_transforms = transforms; if (!m_isActiveGameObjectLocked) { if (s_transforms.empty()) { if (s_activeTransform.lock() != nullptr) { s_activeTransform.reset(); activeObjectChanged(); } } else { auto const & t = s_transforms.front(); if (s_activeTransform.lock() != t.lock()) { s_activeTransform = t; activeObjectChanged(); } } } selectionChanged(); } bool Selection::Contains(FishEngine::TransformPtr transform) { for (auto const & item : s_transforms) { if (item.lock() == transform) return true; } return false; } }
{ "content_hash": "a081e2e2e28c9dbb463f7bb3279fd24b", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 98, "avg_line_length": 21.527027027027028, "alnum_prop": 0.6848713119899561, "repo_name": "yushroom/FishEngine", "id": "64689156de3b4309910bae2514aef32e655aca9b", "size": "1656", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Engine/Source/FishEditor/Selection.cpp", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "642" }, { "name": "C", "bytes": "4303193" }, { "name": "C++", "bytes": "13555197" }, { "name": "CMake", "bytes": "77478" }, { "name": "GLSL", "bytes": "47353" }, { "name": "Gnuplot", "bytes": "424" }, { "name": "Makefile", "bytes": "11384" }, { "name": "Mathematica", "bytes": "15352" }, { "name": "Objective-C", "bytes": "188282" }, { "name": "PAWN", "bytes": "1670" }, { "name": "Perl", "bytes": "1297" }, { "name": "Python", "bytes": "163726" }, { "name": "QMake", "bytes": "5836" }, { "name": "ShaderLab", "bytes": "1372" }, { "name": "Shell", "bytes": "227" } ], "symlink_target": "" }
ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
{ "content_hash": "d830da19f99fd87ec9f923295a2495b6", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 31, "avg_line_length": 9.692307692307692, "alnum_prop": 0.7063492063492064, "repo_name": "mdoering/backbone", "id": "d9b5ffac9985ec28098c5cc48207b827081a6879", "size": "186", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Magnoliophyta/Liliopsida/Asparagales/Asparagaceae/Furcraea/Furcraea variegata/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
namespace agency { namespace cuda { using concurrent_executor = block_executor; } // end cuda } // end agency
{ "content_hash": "3a5c45d42e9bc6298a8e05166a9543f0", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 43, "avg_line_length": 8.923076923076923, "alnum_prop": 0.6896551724137931, "repo_name": "egaburov/agency", "id": "f189dc2d1f02971d6b557968cd24ffccfedf0516", "size": "192", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "agency/cuda/execution/executor/concurrent_executor.hpp", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "1068498" }, { "name": "Cuda", "bytes": "155743" }, { "name": "Python", "bytes": "17912" } ], "symlink_target": "" }
POST https://api.recurly.com/v2/accounts/invoicemock/invoices HTTP/1.1 X-Api-Version: 2.1 Accept: application/xml Authorization: Basic YXBpa2V5Og== User-Agent: recurly-python/{version} Content-Length: 0  HTTP/1.1 422 Unprocessable Entity Content-Type: application/xml; charset=utf-8 <?xml version="1.0" encoding="UTF-8"?> <error> <symbol>will_not_invoice</symbol> <description>No charges to invoice</description> </error>
{ "content_hash": "c5153ee662a14aad7fb96135b252adb1", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 70, "avg_line_length": 26.8125, "alnum_prop": 0.7575757575757576, "repo_name": "tbartelmess/recurly-client-python", "id": "de7a7dfba5d920bb7cf07f1da9ffe2878ab9492d", "size": "429", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/fixtures/invoice/error-no-charges.xml", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "124761" } ], "symlink_target": "" }
TensorFlow's eager execution is an imperative programming environment that evaluates operations immediately, without building graphs: operations return concrete values instead of constructing a computational graph to run later. This makes it easy to get started with TensorFlow and debug models, and it reduces boilerplate as well. To follow along with this guide, run the code samples below in an interactive `python` interpreter. Eager execution is a flexible machine learning platform for research and experimentation, providing: * *An intuitive interface*—Structure your code naturally and use Python data structures. Quickly iterate on small models and small data. * *Easier debugging*—Call ops directly to inspect running models and test changes. Use standard Python debugging tools for immediate error reporting. * *Natural control flow*—Use Python control flow instead of graph control flow, simplifying the specification of dynamic models. Eager execution supports most TensorFlow operations and GPU acceleration. For a collection of examples running in eager execution, see: [tensorflow/contrib/eager/python/examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples). Note: Some models may experience increased overhead with eager execution enabled. Performance improvements are ongoing, but please [file a bug](https://github.com/tensorflow/tensorflow/issues) if you find a problem and share your benchmarks. ## Setup and basic usage Upgrade to the latest version of TensorFlow: ``` $ pip install --upgrade tensorflow ``` To start eager execution, add `tf.enable_eager_execution()` to the beginning of the program or console session. Do not add this operation to other modules that the program calls. ```py from __future__ import absolute_import, division, print_function import tensorflow as tf tf.enable_eager_execution() ``` Now you can run TensorFlow operations and the results will return immediately: ```py tf.executing_eagerly() # => True x = [[2.]] m = tf.matmul(x, x) print("hello, {}".format(m)) # => "hello, [[4.]]" ``` Enabling eager execution changes how TensorFlow operations behave—now they immediately evaluate and return their values to Python. `tf.Tensor` objects reference concrete values instead of symbolic handles to nodes in a computational graph. Since there isn't a computational graph to build and run later in a session, it's easy to inspect results using `print()` or a debugger. Evaluating, printing, and checking tensor values does not break the flow for computing gradients. Eager execution works nicely with [NumPy](http://www.numpy.org/). NumPy operations accept `tf.Tensor` arguments. TensorFlow [math operations](https://www.tensorflow.org/api_guides/python/math_ops) convert Python objects and NumPy arrays to `tf.Tensor` objects. The `tf.Tensor.numpy` method returns the object's value as a NumPy `ndarray`. ```py a = tf.constant([[1, 2], [3, 4]]) print(a) # => tf.Tensor([[1 2] # [3 4]], shape=(2, 2), dtype=int32) # Broadcasting support b = tf.add(a, 1) print(b) # => tf.Tensor([[2 3] # [4 5]], shape=(2, 2), dtype=int32) # Operator overloading is supported print(a * b) # => tf.Tensor([[ 2 6] # [12 20]], shape=(2, 2), dtype=int32) # Use NumPy values import numpy as np c = np.multiply(a, b) print(c) # => [[ 2 6] # [12 20]] # Obtain numpy value from a tensor: print(a.numpy()) # => [[1 2] # [3 4]] ``` The `tf.contrib.eager` module contains symbols available to both eager and graph execution environments and is useful for writing code to [work with graphs](#work_with_graphs): ```py tfe = tf.contrib.eager ``` ## Dynamic control flow A major benefit of eager execution is that all the functionality of the host language is available while your model is executing. So, for example, it is easy to write [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz): ```py def fizzbuzz(max_num): counter = tf.constant(0) for num in range(max_num): num = tf.constant(num) if num % 3 == 0 and num % 5 == 0: print('FizzBuzz') elif num % 3 == 0: print('Fizz') elif num % 5 == 0: print('Buzz') else: print(num) counter += 1 return counter ``` This has conditionals that depend on tensor values and it prints these values at runtime. ## Build a model Many machine learning models are represented by composing layers. When using TensorFlow with eager execution you can either write your own layers or use a layer provided in the `tf.keras.layers` package. While you can use any Python object to represent a layer, TensorFlow has `tf.keras.layers.Layer` as a convenient base class. Inherit from it to implement your own layer: ```py class MySimpleLayer(tf.keras.layers.Layer): def __init__(self, output_units): self.output_units = output_units def build(self, input): # The build method gets called the first time your layer is used. # Creating variables on build() allows you to make their shape depend # on the input shape and hence remove the need for the user to specify # full shapes. It is possible to create variables during __init__() if # you already know their full shapes. self.kernel = self.add_variable( "kernel", [input.shape[-1], self.output_units]) def call(self, input): # Override call() instead of __call__ so we can perform some bookkeeping. return tf.matmul(input, self.kernel) ``` Use `tf.keras.layers.Dense` layer instead of `MySimpleLayer` above as it has a superset of its functionality (it can also add a bias). When composing layers into models you can use `tf.keras.Sequential` to represent models which are a linear stack of layers. It is easy to use for basic models: ```py model = tf.keras.Sequential([ tf.keras.layers.Dense(10, input_shape=(784,)), # must declare input shape tf.keras.layers.Dense(10) ]) ``` Alternatively, organize models in classes by inheriting from `tf.keras.Model`. This is a container for layers that is a layer itself, allowing `tf.keras.Model` objects to contain other `tf.keras.Model` objects. ```py class MNISTModel(tf.keras.Model): def __init__(self): super(MNISTModel, self).__init__() self.dense1 = tf.keras.layers.Dense(units=10) self.dense2 = tf.keras.layers.Dense(units=10) def call(self, input): """Run the model.""" result = self.dense1(input) result = self.dense2(result) result = self.dense2(result) # reuse variables from dense2 layer return result model = MNISTModel() ``` It's not required to set an input shape for the `tf.keras.Model` class since the parameters are set the first time input is passed to the layer. `tf.keras.layers` classes create and contain their own model variables that are tied to the lifetime of their layer objects. To share layer variables, share their objects. ## Eager training ### Computing gradients [Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation) is useful for implementing machine learning algorithms such as [backpropagation](https://en.wikipedia.org/wiki/Backpropagation) for training neural networks. During eager execution, use `tf.GradientTape` to trace operations for computing gradients later. `tf.GradientTape` is an opt-in feature to provide maximal performance when not tracing. Since different operations can occur during each call, all forward-pass operations get recorded to a "tape". To compute the gradient, play the tape backwards and then discard. A particular `tf.GradientTape` can only compute one gradient; subsequent calls throw a runtime error. ```py w = tfe.Variable([[1.0]]) with tf.GradientTape() as tape: loss = w * w grad = tape.gradient(loss, [w]) print(grad) # => [tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)] ``` Here's an example of `tf.GradientTape` that records forward-pass operations to train a simple model: ```py # A toy dataset of points around 3 * x + 2 NUM_EXAMPLES = 1000 training_inputs = tf.random_normal([NUM_EXAMPLES]) noise = tf.random_normal([NUM_EXAMPLES]) training_outputs = training_inputs * 3 + 2 + noise def prediction(input, weight, bias): return input * weight + bias # A loss function using mean-squared error def loss(weights, biases): error = prediction(training_inputs, weights, biases) - training_outputs return tf.reduce_mean(tf.square(error)) # Return the derivative of loss with respect to weight and bias def grad(weights, biases): with tf.GradientTape() as tape: loss_value = loss(weights, biases) return tape.gradient(loss_value, [weights, biases]) train_steps = 200 learning_rate = 0.01 # Start with arbitrary values for W and B on the same batch of data W = tfe.Variable(5.) B = tfe.Variable(10.) print("Initial loss: {:.3f}".format(loss(W, B))) for i in range(train_steps): dW, dB = grad(W, B) W.assign_sub(dW * learning_rate) B.assign_sub(dB * learning_rate) if i % 20 == 0: print("Loss at step {:03d}: {:.3f}".format(i, loss(W, B))) print("Final loss: {:.3f}".format(loss(W, B))) print("W = {}, B = {}".format(W.numpy(), B.numpy())) ``` Output (exact numbers may vary): ``` Initial loss: 71.204 Loss at step 000: 68.333 Loss at step 020: 30.222 Loss at step 040: 13.691 Loss at step 060: 6.508 Loss at step 080: 3.382 Loss at step 100: 2.018 Loss at step 120: 1.422 Loss at step 140: 1.161 Loss at step 160: 1.046 Loss at step 180: 0.996 Final loss: 0.974 W = 3.01582956314, B = 2.1191945076 ``` Replay the `tf.GradientTape` to compute the gradients and apply them in a training loop. This is demonstrated in an excerpt from the [mnist_eager.py](https://github.com/tensorflow/models/blob/master/official/mnist/mnist_eager.py) example: ```py dataset = tf.data.Dataset.from_tensor_slices((data.train.images, data.train.labels)) ... for (batch, (images, labels)) in enumerate(dataset): ... with tf.GradientTape() as tape: logits = model(images, training=True) loss_value = loss(logits, labels) ... grads = tape.gradient(loss_value, model.variables) optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step()) ``` The following example creates a multi-layer model that classifies the standard [MNIST handwritten digits](https://www.tensorflow.org/tutorials/layers). It demonstrates the optimizer and layer APIs to build trainable graphs in an eager execution environment. ### Train a model Even without training, call the model and inspect the output in eager execution: ```py # Create a tensor representing a blank image batch = tf.zeros([1, 1, 784]) print(batch.shape) # => (1, 1, 784) result = model(batch) # => tf.Tensor([[[ 0. 0., ..., 0.]]], shape=(1, 1, 10), dtype=float32) ``` This example uses the [dataset.py module](https://github.com/tensorflow/models/blob/master/official/mnist/dataset.py) from the [TensorFlow MNIST example](https://github.com/tensorflow/models/tree/master/official/mnist); download this file to your local directory. Run the following to download the MNIST data files to your working directory and prepare a `tf.data.Dataset` for training: ```py import dataset # download dataset.py file dataset_train = dataset.train('./datasets').shuffle(60000).repeat(4).batch(32) ``` To train a model, define a loss function to optimize and then calculate gradients. Use an optimizer to update the variables: ```py def loss(model, x, y): prediction = model(x) return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=prediction) def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, model.variables) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) x, y = iter(dataset_train).next() print("Initial loss: {:.3f}".format(loss(model, x, y))) # Training loop for (i, (x, y)) in enumerate(dataset_train): # Calculate derivatives of the input function with respect to its parameters. grads = grad(model, x, y) # Apply the gradient to the model optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step()) if i % 200 == 0: print("Loss at step {:04d}: {:.3f}".format(i, loss(model, x, y))) print("Final loss: {:.3f}".format(loss(model, x, y))) ``` Output (exact numbers may vary): ``` Initial loss: 2.674 Loss at step 0000: 2.593 Loss at step 0200: 2.143 Loss at step 0400: 2.009 Loss at step 0600: 2.103 Loss at step 0800: 1.621 Loss at step 1000: 1.695 ... Loss at step 6600: 0.602 Loss at step 6800: 0.557 Loss at step 7000: 0.499 Loss at step 7200: 0.744 Loss at step 7400: 0.681 Final loss: 0.670 ``` And for faster training, move the computation to a GPU: ```py with tf.device("/gpu:0"): for (i, (x, y)) in enumerate(dataset_train): # minimize() is equivalent to the grad() and apply_gradients() calls. optimizer.minimize(lambda: loss(model, x, y), global_step=tf.train.get_or_create_global_step()) ``` ### Variables and optimizers `tfe.Variable` objects store mutable `tf.Tensor` values accessed during training to make automatic differentiation easier. The parameters of a model can be encapsulated in classes as variables. Better encapsulate model parameters by using `tfe.Variable` with `tf.GradientTape`. For example, the automatic differentiation example above can be rewritten: ```py class Model(tf.keras.Model): def __init__(self): super(Model, self).__init__() self.W = tfe.Variable(5., name='weight') self.B = tfe.Variable(10., name='bias') def predict(self, inputs): return inputs * self.W + self.B # A toy dataset of points around 3 * x + 2 NUM_EXAMPLES = 2000 training_inputs = tf.random_normal([NUM_EXAMPLES]) noise = tf.random_normal([NUM_EXAMPLES]) training_outputs = training_inputs * 3 + 2 + noise # The loss function to be optimized def loss(model, inputs, targets): error = model.predict(inputs) - targets return tf.reduce_mean(tf.square(error)) def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, [model.W, model.B]) # Define: # 1. A model. # 2. Derivatives of a loss function with respect to model parameters. # 3. A strategy for updating the variables based on the derivatives. model = Model() optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) # Training loop for i in range(300): grads = grad(model, training_inputs, training_outputs) optimizer.apply_gradients(zip(grads, [model.W, model.B]), global_step=tf.train.get_or_create_global_step()) if i % 20 == 0: print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs))) print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs))) print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy())) ``` Output (exact numbers may vary): ``` Initial loss: 69.066 Loss at step 000: 66.368 Loss at step 020: 30.107 Loss at step 040: 13.959 Loss at step 060: 6.769 Loss at step 080: 3.567 Loss at step 100: 2.141 Loss at step 120: 1.506 Loss at step 140: 1.223 Loss at step 160: 1.097 Loss at step 180: 1.041 Loss at step 200: 1.016 Loss at step 220: 1.005 Loss at step 240: 1.000 Loss at step 260: 0.998 Loss at step 280: 0.997 Final loss: 0.996 W = 2.99431324005, B = 2.02129220963 ``` ## Use objects for state during eager execution With graph execution, program state (such as the variables) is stored in global collections and their lifetime is managed by the `tf.Session` object. In contrast, during eager execution the lifetime of state objects is determined by the lifetime of their corresponding Python object. ### Variables are objects During eager execution, variables persist until the last reference to the object is removed, and is then deleted. ```py with tf.device("gpu:0"): v = tfe.Variable(tf.random_normal([1000, 1000])) v = None # v no longer takes up GPU memory ``` ### Object-based saving `tfe.Checkpoint` can save and restore `tfe.Variable`s to and from checkpoints: ```py x = tfe.Variable(10.) checkpoint = tfe.Checkpoint(x=x) # save as "x" x.assign(2.) # Assign a new value to the variables and save. save_path = checkpoint.save('./ckpt/') x.assign(11.) # Change the variable after saving. # Restore values from the checkpoint checkpoint.restore(save_path) print(x) # => 2.0 ``` To save and load models, `tfe.Checkpoint` stores the internal state of objects, without requiring hidden variables. To record the state of a `model`, an `optimizer`, and a global step, pass them to a `tfe.Checkpoint`: ```py model = MyModel() optimizer = tf.train.AdamOptimizer(learning_rate=0.001) checkpoint_dir = ‘/path/to/model_dir’ checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") root = tfe.Checkpoint(optimizer=optimizer, model=model, optimizer_step=tf.train.get_or_create_global_step()) root.save(file_prefix=checkpoint_prefix) # or root.restore(tf.train.latest_checkpoint(checkpoint_dir)) ``` ### Object-oriented metrics `tfe.metrics` are stored as objects. Update a metric by passing the new data to the callable, and retrieve the result using the `tfe.metrics.result` method, for example: ```py m = tfe.metrics.Mean("loss") m(0) m(5) m.result() # => 2.5 m([8, 9]) m.result() # => 5.5 ``` #### Summaries and TensorBoard @{$summaries_and_tensorboard$TensorBoard} is a visualization tool for understanding, debugging and optimizing the model training process. It uses summary events that are written while executing the program. `tf.contrib.summary` is compatible with both eager and graph execution environments. Summary operations, such as `tf.contrib.summary.scalar`, are inserted during model construction. For example, to record summaries once every 100 global steps: ```py writer = tf.contrib.summary.create_file_writer(logdir) global_step=tf.train.get_or_create_global_step() # return global step var writer.set_as_default() for _ in range(iterations): global_step.assign_add(1) # Must include a record_summaries method with tf.contrib.summary.record_summaries_every_n_global_steps(100): # your model code goes here tf.contrib.summary.scalar('loss', loss) ... ``` ## Advanced automatic differentiation topics ### Dynamic models `tf.GradientTape` can also be used in dynamic models. This example for a [backtracking line search](https://wikipedia.org/wiki/Backtracking_line_search) algorithm looks like normal NumPy code, except there are gradients and is differentiable, despite the complex control flow: ```py def line_search_step(fn, init_x, rate=1.0): with tf.GradientTape() as tape: # Variables are automatically recorded, but manually watch a tensor tape.watch(init_x) value = fn(init_x) grad, = tape.gradient(value, [init_x]) grad_norm = tf.reduce_sum(grad * grad) init_value = value while value > init_value - rate * grad_norm: x = init_x - rate * grad value = fn(x) rate /= 2.0 return x, value ``` ### Additional functions to compute gradients `tf.GradientTape` is a powerful interface for computing gradients, but there is another [Autograd](https://github.com/HIPS/autograd)-style API available for automatic differentiation. These functions are useful if writing math code with only tensors and gradient functions, and without `tfe.Variables`: * `tfe.gradients_function` —Returns a function that computes the derivatives of its input function parameter with respect to its arguments. The input function parameter must return a scalar value. When the returned function is invoked, it returns a list of `tf.Tensor` objects: one element for each argument of the input function. Since anything of interest must be passed as a function parameter, this becomes unwieldy if there's a dependency on many trainable parameters. * `tfe.value_and_gradients_function` —Similar to `tfe.gradients_function`, but when the returned function is invoked, it returns the value from the input function in addition to the list of derivatives of the input function with respect to its arguments. In the following example, `tfe.gradients_function` takes the `square` function as an argument and returns a function that computes the partial derivatives of `square` with respect to its inputs. To calculate the derivative of `square` at `3`, `grad(3.0)` returns `6`. ```py def square(x): return tf.multiply(x, x) grad = tfe.gradients_function(square) square(3.) # => 9.0 grad(3.) # => [6.0] # The second-order derivative of square: gradgrad = tfe.gradients_function(lambda x: grad(x)[0]) gradgrad(3.) # => [2.0] # The third-order derivative is None: gradgradgrad = tfe.gradients_function(lambda x: gradgrad(x)[0]) gradgradgrad(3.) # => [None] # With flow control: def abs(x): return x if x > 0. else -x grad = tfe.gradients_function(abs) grad(3.) # => [1.0] grad(-3.) # => [-1.0] ``` ### Custom gradients Custom gradients are an easy way to override gradients in eager and graph execution. Within the forward function, define the gradient with respect to the inputs, outputs, or intermediate results. For example, here's an easy way to clip the norm of the gradients in the backward pass: ```py @tf.custom_gradient def clip_gradient_by_norm(x, norm): y = tf.identity(x) def grad_fn(dresult): return [tf.clip_by_norm(dresult, norm), None] return y, grad_fn ``` Custom gradients are commonly used to provide a numerically stable gradient for a sequence of operations: ```py def log1pexp(x): return tf.log(1 + tf.exp(x)) grad_log1pexp = tfe.gradients_function(log1pexp) # The gradient computation works fine at x = 0. grad_log1pexp(0.) # => [0.5] # However, x = 100 fails because of numerical instability. grad_log1pexp(100.) # => [nan] ``` Here, the `log1pexp` function can be analytically simplified with a custom gradient. The implementation below reuses the value for `tf.exp(x)` that is computed during the forward pass—making it more efficient by eliminating redundant calculations: ```py @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.log(1 + e), grad grad_log1pexp = tfe.gradients_function(log1pexp) # As before, the gradient computation works fine at x = 0. grad_log1pexp(0.) # => [0.5] # And the gradient computation also works at x = 100. grad_log1pexp(100.) # => [1.0] ``` ## Performance Computation is automatically offloaded to GPUs during eager execution. If you want control over where a computation runs you can enclose it in a `tf.device('/gpu:0')` block (or the CPU equivalent): ```py import time def measure(x, steps): # TensorFlow initializes a GPU the first time it's used, exclude from timing. tf.matmul(x, x) start = time.time() for i in range(steps): x = tf.matmul(x, x) _ = x.numpy() # Make sure to execute op and not just enqueue it end = time.time() return end - start shape = (1000, 1000) steps = 200 print("Time to multiply a {} matrix by itself {} times:".format(shape, steps)) # Run on CPU: with tf.device("/cpu:0"): print("CPU: {} secs".format(measure(tf.random_normal(shape), steps))) # Run on GPU, if available: if tfe.num_gpus() > 0: with tf.device("/gpu:0"): print("GPU: {} secs".format(measure(tf.random_normal(shape), steps))) else: print("GPU: not found") ``` Output (exact numbers depend on hardware): ``` Time to multiply a (1000, 1000) matrix by itself 200 times: CPU: 4.614904403686523 secs GPU: 0.5581181049346924 secs ``` A `tf.Tensor` object can be copied to a different device to execute its operations: ```py x = tf.random_normal([10, 10]) x_gpu0 = x.gpu() x_cpu = x.cpu() _ = tf.matmul(x_cpu, x_cpu) # Runs on CPU _ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0 if tfe.num_gpus() > 1: x_gpu1 = x.gpu(1) _ = tf.matmul(x_gpu1, x_gpu1) # Runs on GPU:1 ``` ### Benchmarks For compute-heavy models, such as [ResNet50](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/resnet50) training on a GPU, eager execution performance is comparable to graph execution. But this gap grows larger for models with less computation and there is work to be done for optimizing hot code paths for models with lots of small operations. ## Work with graphs While eager execution makes development and debugging more interactive, TensorFlow graph execution has advantages for distributed training, performance optimizations, and production deployment. However, writing graph code can feel different than writing regular Python code and more difficult to debug. For building and training graph-constructed models, the Python program first builds a graph representing the computation, then invokes `Session.run` to send the graph for execution on the C++-based runtime. This provides: * Automatic differentiation using static autodiff. * Simple deployment to a platform independent server. * Graph-based optimizations (common subexpression elimination, constant-folding, etc.). * Compilation and kernel fusion. * Automatic distribution and replication (placing nodes on the distributed system). Deploying code written for eager execution is more difficult: either generate a graph from the model, or run the Python runtime and code directly on the server. ### Write compatible code The same code written for eager execution will also build a graph during graph execution. Do this by simply running the same code in a new Python session where eager execution is not enabled. Most TensorFlow operations work during eager execution, but there are some things to keep in mind: * Use `tf.data` for input processing instead of queues. It's faster and easier. * Use object-oriented layer APIs—like `tf.keras.layers` and `tf.keras.Model`—since they have explicit storage for variables. * Most model code works the same during eager and graph execution, but there are exceptions. (For example, dynamic models using Python control flow to change the computation based on inputs.) * Once eager execution is enabled with `tf.enable_eager_execution`, it cannot be turned off. Start a new Python session to return to graph execution. It's best to write code for both eager execution *and* graph execution. This gives you eager's interactive experimentation and debuggability with the distributed performance benefits of graph execution. Write, debug, and iterate in eager execution, then import the model graph for production deployment. Use `tfe.Checkpoint` to save and restore model variables, this allows movement between eager and graph execution environments. See the examples in: [tensorflow/contrib/eager/python/examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples). ### Use eager execution in a graph environment Selectively enable eager execution in a TensorFlow graph environment using `tfe.py_func`. This is used when `tf.enable_eager_execution()` has *not* been called. ```py def my_py_func(x): x = tf.matmul(x, x) # You can use tf ops print(x) # but it's eager! return x with tf.Session() as sess: x = tf.placeholder(dtype=tf.float32) # Call eager function in graph! pf = tfe.py_func(my_py_func, [x], tf.float32) sess.run(pf, feed_dict={x: [[2.0]]}) # [[4.0]] ```
{ "content_hash": "a460b64883ed89b1c5bbefc623e63a76", "timestamp": "", "source": "github", "line_count": 846, "max_line_length": 138, "avg_line_length": 32.61347517730496, "alnum_prop": 0.7234967924323149, "repo_name": "eaplatanios/tensorflow", "id": "595e6be4af78d7d684ddeca0adea59e5a754134d", "size": "27632", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tensorflow/docs_src/programmers_guide/eager.md", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "9274" }, { "name": "C", "bytes": "163987" }, { "name": "C++", "bytes": "34944901" }, { "name": "CMake", "bytes": "5123" }, { "name": "CSS", "bytes": "9206" }, { "name": "Go", "bytes": "1047216" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "423531" }, { "name": "JavaScript", "bytes": "3127" }, { "name": "Jupyter Notebook", "bytes": "1833814" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Objective-C", "bytes": "7056" }, { "name": "Objective-C++", "bytes": "63210" }, { "name": "Perl", "bytes": "6179" }, { "name": "Perl 6", "bytes": "1357" }, { "name": "PureBasic", "bytes": "24932" }, { "name": "Python", "bytes": "19718973" }, { "name": "Ruby", "bytes": "327" }, { "name": "Scala", "bytes": "3606806" }, { "name": "Shell", "bytes": "352897" }, { "name": "Smarty", "bytes": "6870" } ], "symlink_target": "" }
ACCEPTED #### According to NUB Generator [autonym] #### Published in null #### Original name null ### Remarks null
{ "content_hash": "6f6b7d5dea620e8c752107b41452c1a3", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 23, "avg_line_length": 9.076923076923077, "alnum_prop": 0.6779661016949152, "repo_name": "mdoering/backbone", "id": "aae6f0583d7d9608f22b21833c842940cddb5e36", "size": "169", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Bacillariophyta/Bacillariophyceae/Naviculales/Naviculaceae/Navicula/Navicula maxima/Navicula maxima maxima/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
<!-- Auto Generated Below --> ## Properties | Property | Attribute | Description | Type | Default | | ---------- | ---------- | ----------- | -------- | ----------- | | `position` | `position` | | `string` | `undefined` | ---------------------------------------------- *Built with [StencilJS](https://stenciljs.com/)*
{ "content_hash": "147d003884a8c0a9bc2419753dd756f9", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 66, "avg_line_length": 26.692307692307693, "alnum_prop": 0.38328530259365995, "repo_name": "BlazeCSS/blaze", "id": "5739739a998275f2228fa2274610fd5e34ad3df2", "size": "363", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/components/alert/readme.md", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "101868" }, { "name": "JavaScript", "bytes": "1904" } ], "symlink_target": "" }
package edu.toronto.cs.xcurator.discoverer; import edu.toronto.cs.xcurator.common.DataDocument; import edu.toronto.cs.xcurator.mapping.Attribute; import edu.toronto.cs.xcurator.mapping.Entity; import edu.toronto.cs.xcurator.mapping.Mapping; import java.util.Iterator; import java.util.List; /** * * @author ekzhu */ public class KeyAttributeDiscovery implements MappingDiscoveryStep { @Override public void process(List<DataDocument> dataDocuments, Mapping mapping) { Iterator<Entity> it = mapping.getEntityIterator(); while (it.hasNext()) { // For each entity, find attribute whose instances are unique // That is, the cardinality of the attribute instances should equal // to the cardinality of the entity instances // The value attribute should not be used as key. // Its instance count should be zero Entity entity = it.next(); int instanceCount = entity.getXmlInstanceCount(); Iterator<Attribute> attrIt = entity.getAttributeIterator(); while (attrIt.hasNext()) { Attribute attr = attrIt.next(); // This is a hack, the key identification algorithm needs to be // improved. if (attr.getInstances().size() == instanceCount && attr.getId().endsWith(".id")) { attr.asKey(); } } } } }
{ "content_hash": "b7901a6f027e73d24ae8ccda6d45babe", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 80, "avg_line_length": 35.80487804878049, "alnum_prop": 0.6198910081743869, "repo_name": "Aleyasen/xcurator", "id": "1e0d5d7419147ebade3030f7c588569d3a9f14c2", "size": "2110", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/edu/toronto/cs/xcurator/discoverer/KeyAttributeDiscovery.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "26" }, { "name": "Java", "bytes": "614961" }, { "name": "Shell", "bytes": "40" } ], "symlink_target": "" }
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.7.0_75) on Sun Mar 01 12:04:16 AEDT 2015 --> <title>X509CertificateHolderSelector (Bouncy Castle Library 1.52 API Specification)</title> <meta name="date" content="2015-03-01"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="X509CertificateHolderSelector (Bouncy Castle Library 1.52 API Specification)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> <div class="aboutLanguage"><em><b>Bouncy Castle Cryptography Library 1.52</b></em></div> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../org/bouncycastle/cert/selector/X509AttributeCertificateHolderSelectorBuilder.html" title="class in org.bouncycastle.cert.selector"><span class="strong">Prev Class</span></a></li> <li>Next Class</li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/bouncycastle/cert/selector/X509CertificateHolderSelector.html" target="_top">Frames</a></li> <li><a href="X509CertificateHolderSelector.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_detail">Method</a></li> </ul> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <!-- ======== START OF CLASS DATA ======== --> <div class="header"> <div class="subTitle">org.bouncycastle.cert.selector</div> <h2 title="Class X509CertificateHolderSelector" class="title">Class X509CertificateHolderSelector</h2> </div> <div class="contentContainer"> <ul class="inheritance"> <li>java.lang.Object</li> <li> <ul class="inheritance"> <li>org.bouncycastle.cert.selector.X509CertificateHolderSelector</li> </ul> </li> </ul> <div class="description"> <ul class="blockList"> <li class="blockList"> <dl> <dt>All Implemented Interfaces:</dt> <dd>java.lang.Cloneable, org.bouncycastle.util.Selector</dd> </dl> <dl> <dt>Direct Known Subclasses:</dt> <dd><a href="../../../../org/bouncycastle/cert/selector/jcajce/JcaX509CertificateHolderSelector.html" title="class in org.bouncycastle.cert.selector.jcajce">JcaX509CertificateHolderSelector</a></dd> </dl> <hr> <br> <pre>public class <span class="strong">X509CertificateHolderSelector</span> extends java.lang.Object implements org.bouncycastle.util.Selector</pre> <div class="block">a basic index for a X509CertificateHolder class</div> </li> </ul> </div> <div class="summary"> <ul class="blockList"> <li class="blockList"> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor_summary"> <!-- --> </a> <h3>Constructor Summary</h3> <table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> <caption><span>Constructors</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colOne" scope="col">Constructor and Description</th> </tr> <tr class="altColor"> <td class="colOne"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#X509CertificateHolderSelector(byte[])">X509CertificateHolderSelector</a></strong>(byte[]&nbsp;subjectKeyId)</code> <div class="block">Construct a selector with the value of a public key's subjectKeyId.</div> </td> </tr> <tr class="rowColor"> <td class="colOne"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name,%20java.math.BigInteger)">X509CertificateHolderSelector</a></strong>(org.bouncycastle.asn1.x500.X500Name&nbsp;issuer, java.math.BigInteger&nbsp;serialNumber)</code> <div class="block">Construct a signer ID based on the issuer and serial number of the signer's associated certificate.</div> </td> </tr> <tr class="altColor"> <td class="colOne"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name,%20java.math.BigInteger,%20byte[])">X509CertificateHolderSelector</a></strong>(org.bouncycastle.asn1.x500.X500Name&nbsp;issuer, java.math.BigInteger&nbsp;serialNumber, byte[]&nbsp;subjectKeyId)</code> <div class="block">Construct a signer ID based on the issuer and serial number of the signer's associated certificate.</div> </td> </tr> </table> </li> </ul> <!-- ========== METHOD SUMMARY =========== --> <ul class="blockList"> <li class="blockList"><a name="method_summary"> <!-- --> </a> <h3>Method Summary</h3> <table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> <caption><span>Methods</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colLast" scope="col">Method and Description</th> </tr> <tr class="altColor"> <td class="colFirst"><code>java.lang.Object</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#clone()">clone</a></strong>()</code>&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>boolean</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#equals(java.lang.Object)">equals</a></strong>(java.lang.Object&nbsp;o)</code>&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>org.bouncycastle.asn1.x500.X500Name</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#getIssuer()">getIssuer</a></strong>()</code>&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>java.math.BigInteger</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#getSerialNumber()">getSerialNumber</a></strong>()</code>&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>byte[]</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#getSubjectKeyIdentifier()">getSubjectKeyIdentifier</a></strong>()</code>&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>int</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#hashCode()">hashCode</a></strong>()</code>&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>boolean</code></td> <td class="colLast"><code><strong><a href="../../../../org/bouncycastle/cert/selector/X509CertificateHolderSelector.html#match(java.lang.Object)">match</a></strong>(java.lang.Object&nbsp;obj)</code>&nbsp;</td> </tr> </table> <ul class="blockList"> <li class="blockList"><a name="methods_inherited_from_class_java.lang.Object"> <!-- --> </a> <h3>Methods inherited from class&nbsp;java.lang.Object</h3> <code>finalize, getClass, notify, notifyAll, toString, wait, wait, wait</code></li> </ul> </li> </ul> </li> </ul> </div> <div class="details"> <ul class="blockList"> <li class="blockList"> <!-- ========= CONSTRUCTOR DETAIL ======== --> <ul class="blockList"> <li class="blockList"><a name="constructor_detail"> <!-- --> </a> <h3>Constructor Detail</h3> <a name="X509CertificateHolderSelector(byte[])"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>X509CertificateHolderSelector</h4> <pre>public&nbsp;X509CertificateHolderSelector(byte[]&nbsp;subjectKeyId)</pre> <div class="block">Construct a selector with the value of a public key's subjectKeyId.</div> <dl><dt><span class="strong">Parameters:</span></dt><dd><code>subjectKeyId</code> - a subjectKeyId</dd></dl> </li> </ul> <a name="X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name, java.math.BigInteger)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>X509CertificateHolderSelector</h4> <pre>public&nbsp;X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name&nbsp;issuer, java.math.BigInteger&nbsp;serialNumber)</pre> <div class="block">Construct a signer ID based on the issuer and serial number of the signer's associated certificate.</div> <dl><dt><span class="strong">Parameters:</span></dt><dd><code>issuer</code> - the issuer of the signer's associated certificate.</dd><dd><code>serialNumber</code> - the serial number of the signer's associated certificate.</dd></dl> </li> </ul> <a name="X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name, java.math.BigInteger, byte[])"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>X509CertificateHolderSelector</h4> <pre>public&nbsp;X509CertificateHolderSelector(org.bouncycastle.asn1.x500.X500Name&nbsp;issuer, java.math.BigInteger&nbsp;serialNumber, byte[]&nbsp;subjectKeyId)</pre> <div class="block">Construct a signer ID based on the issuer and serial number of the signer's associated certificate.</div> <dl><dt><span class="strong">Parameters:</span></dt><dd><code>issuer</code> - the issuer of the signer's associated certificate.</dd><dd><code>serialNumber</code> - the serial number of the signer's associated certificate.</dd><dd><code>subjectKeyId</code> - the subject key identifier to use to match the signers associated certificate.</dd></dl> </li> </ul> </li> </ul> <!-- ============ METHOD DETAIL ========== --> <ul class="blockList"> <li class="blockList"><a name="method_detail"> <!-- --> </a> <h3>Method Detail</h3> <a name="getIssuer()"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>getIssuer</h4> <pre>public&nbsp;org.bouncycastle.asn1.x500.X500Name&nbsp;getIssuer()</pre> </li> </ul> <a name="getSerialNumber()"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>getSerialNumber</h4> <pre>public&nbsp;java.math.BigInteger&nbsp;getSerialNumber()</pre> </li> </ul> <a name="getSubjectKeyIdentifier()"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>getSubjectKeyIdentifier</h4> <pre>public&nbsp;byte[]&nbsp;getSubjectKeyIdentifier()</pre> </li> </ul> <a name="hashCode()"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>hashCode</h4> <pre>public&nbsp;int&nbsp;hashCode()</pre> <dl> <dt><strong>Overrides:</strong></dt> <dd><code>hashCode</code>&nbsp;in class&nbsp;<code>java.lang.Object</code></dd> </dl> </li> </ul> <a name="equals(java.lang.Object)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>equals</h4> <pre>public&nbsp;boolean&nbsp;equals(java.lang.Object&nbsp;o)</pre> <dl> <dt><strong>Overrides:</strong></dt> <dd><code>equals</code>&nbsp;in class&nbsp;<code>java.lang.Object</code></dd> </dl> </li> </ul> <a name="match(java.lang.Object)"> <!-- --> </a> <ul class="blockList"> <li class="blockList"> <h4>match</h4> <pre>public&nbsp;boolean&nbsp;match(java.lang.Object&nbsp;obj)</pre> <dl> <dt><strong>Specified by:</strong></dt> <dd><code>match</code>&nbsp;in interface&nbsp;<code>org.bouncycastle.util.Selector</code></dd> </dl> </li> </ul> <a name="clone()"> <!-- --> </a> <ul class="blockListLast"> <li class="blockList"> <h4>clone</h4> <pre>public&nbsp;java.lang.Object&nbsp;clone()</pre> <dl> <dt><strong>Specified by:</strong></dt> <dd><code>clone</code>&nbsp;in interface&nbsp;<code>org.bouncycastle.util.Selector</code></dd> <dt><strong>Overrides:</strong></dt> <dd><code>clone</code>&nbsp;in class&nbsp;<code>java.lang.Object</code></dd> </dl> </li> </ul> </li> </ul> </li> </ul> </div> </div> <!-- ========= END OF CLASS DATA ========= --> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="package-summary.html">Package</a></li> <li class="navBarCell1Rev">Class</li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> <div class="aboutLanguage"><em><b>Bouncy Castle Cryptography Library 1.52</b></em></div> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../org/bouncycastle/cert/selector/X509AttributeCertificateHolderSelectorBuilder.html" title="class in org.bouncycastle.cert.selector"><span class="strong">Prev Class</span></a></li> <li>Next Class</li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/bouncycastle/cert/selector/X509CertificateHolderSelector.html" target="_top">Frames</a></li> <li><a href="X509CertificateHolderSelector.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <div> <ul class="subNavList"> <li>Summary:&nbsp;</li> <li>Nested&nbsp;|&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_summary">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_summary">Method</a></li> </ul> <ul class="subNavList"> <li>Detail:&nbsp;</li> <li>Field&nbsp;|&nbsp;</li> <li><a href="#constructor_detail">Constr</a>&nbsp;|&nbsp;</li> <li><a href="#method_detail">Method</a></li> </ul> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
{ "content_hash": "c0939a8c350188b038dfeda1c2c6e07a", "timestamp": "", "source": "github", "line_count": 408, "max_line_length": 347, "avg_line_length": 38.134803921568626, "alnum_prop": 0.6715084516999807, "repo_name": "GaloisInc/hacrypto", "id": "4281f4e585dbb78b6e8e18e24b2d878c0a76d0d1", "size": "15559", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/Java/BouncyCastle/bcpkix-jdk15on-152/javadoc/org/bouncycastle/cert/selector/X509CertificateHolderSelector.html", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AGS Script", "bytes": "62991" }, { "name": "Ada", "bytes": "443" }, { "name": "AppleScript", "bytes": "4518" }, { "name": "Assembly", "bytes": "25398957" }, { "name": "Awk", "bytes": "36188" }, { "name": "Batchfile", "bytes": "530568" }, { "name": "C", "bytes": "344517599" }, { "name": "C#", "bytes": "7553169" }, { "name": "C++", "bytes": "36635617" }, { "name": "CMake", "bytes": "213895" }, { "name": "CSS", "bytes": "139462" }, { "name": "Coq", "bytes": "320964" }, { "name": "Cuda", "bytes": "103316" }, { "name": "DIGITAL Command Language", "bytes": "1545539" }, { "name": "DTrace", "bytes": "33228" }, { "name": "Emacs Lisp", "bytes": "22827" }, { "name": "GDB", "bytes": "93449" }, { "name": "Gnuplot", "bytes": "7195" }, { "name": "Go", "bytes": "393057" }, { "name": "HTML", "bytes": "41466430" }, { "name": "Hack", "bytes": "22842" }, { "name": "Haskell", "bytes": "64053" }, { "name": "IDL", "bytes": "3205" }, { "name": "Java", "bytes": "49060925" }, { "name": "JavaScript", "bytes": "3476841" }, { "name": "Jolie", "bytes": "412" }, { "name": "Lex", "bytes": "26290" }, { "name": "Logos", "bytes": "108920" }, { "name": "Lua", "bytes": "427" }, { "name": "M4", "bytes": "2508986" }, { "name": "Makefile", "bytes": "29393197" }, { "name": "Mathematica", "bytes": "48978" }, { "name": "Mercury", "bytes": "2053" }, { "name": "Module Management System", "bytes": "1313" }, { "name": "NSIS", "bytes": "19051" }, { "name": "OCaml", "bytes": "981255" }, { "name": "Objective-C", "bytes": "4099236" }, { "name": "Objective-C++", "bytes": "243505" }, { "name": "PHP", "bytes": "22677635" }, { "name": "Pascal", "bytes": "99565" }, { "name": "Perl", "bytes": "35079773" }, { "name": "Prolog", "bytes": "350124" }, { "name": "Python", "bytes": "1242241" }, { "name": "Rebol", "bytes": "106436" }, { "name": "Roff", "bytes": "16457446" }, { "name": "Ruby", "bytes": "49694" }, { "name": "Scheme", "bytes": "138999" }, { "name": "Shell", "bytes": "10192290" }, { "name": "Smalltalk", "bytes": "22630" }, { "name": "Smarty", "bytes": "51246" }, { "name": "SourcePawn", "bytes": "542790" }, { "name": "SystemVerilog", "bytes": "95379" }, { "name": "Tcl", "bytes": "35696" }, { "name": "TeX", "bytes": "2351627" }, { "name": "Verilog", "bytes": "91541" }, { "name": "Visual Basic", "bytes": "88541" }, { "name": "XS", "bytes": "38300" }, { "name": "Yacc", "bytes": "132970" }, { "name": "eC", "bytes": "33673" }, { "name": "q", "bytes": "145272" }, { "name": "sed", "bytes": "1196" } ], "symlink_target": "" }
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.execution; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.Iterators; import com.google.common.collect.Multimap; import io.prestosql.client.NodeVersion; import io.prestosql.connector.CatalogName; import io.prestosql.execution.scheduler.FlatNetworkTopology; import io.prestosql.execution.scheduler.LegacyNetworkTopology; import io.prestosql.execution.scheduler.NetworkLocation; import io.prestosql.execution.scheduler.NetworkTopology; import io.prestosql.execution.scheduler.NodeScheduler; import io.prestosql.execution.scheduler.NodeSchedulerConfig; import io.prestosql.execution.scheduler.NodeSelector; import io.prestosql.metadata.InMemoryNodeManager; import io.prestosql.metadata.InternalNode; import io.prestosql.metadata.Split; import io.prestosql.spi.HostAddress; import io.prestosql.spi.connector.ConnectorSplit; import io.prestosql.sql.planner.plan.PlanNodeId; import io.prestosql.testing.TestingTransactionHandle; import io.prestosql.util.FinalizerService; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.annotations.OperationsPerInvocation; import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Param; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; import org.openjdk.jmh.runner.options.VerboseMode; import java.net.URI; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static io.prestosql.execution.scheduler.NodeSchedulerConfig.NetworkTopologyType.BENCHMARK; import static io.prestosql.execution.scheduler.NodeSchedulerConfig.NetworkTopologyType.FLAT; import static io.prestosql.execution.scheduler.NodeSchedulerConfig.NetworkTopologyType.LEGACY; import static java.util.concurrent.Executors.newCachedThreadPool; import static java.util.concurrent.Executors.newScheduledThreadPool; @SuppressWarnings("MethodMayBeStatic") @State(Scope.Thread) @OutputTimeUnit(TimeUnit.MICROSECONDS) @Fork(1) @Warmup(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS) @BenchmarkMode(Mode.AverageTime) public class BenchmarkNodeScheduler { private static final int MAX_SPLITS_PER_NODE = 100; private static final int MAX_PENDING_SPLITS_PER_TASK_PER_NODE = 50; private static final int NODES = 200; private static final int DATA_NODES = 10_000; private static final int RACKS = DATA_NODES / 25; private static final int SPLITS = NODES * (MAX_SPLITS_PER_NODE + MAX_PENDING_SPLITS_PER_TASK_PER_NODE / 3); private static final int SPLIT_BATCH_SIZE = 100; private static final CatalogName CONNECTOR_ID = new CatalogName("test_connector_id"); @Benchmark @OperationsPerInvocation(SPLITS) public Object benchmark(BenchmarkData data) { List<RemoteTask> remoteTasks = ImmutableList.copyOf(data.getTaskMap().values()); Iterator<MockRemoteTaskFactory.MockRemoteTask> finishingTask = Iterators.cycle(data.getTaskMap().values()); Iterator<Split> splits = data.getSplits().iterator(); Set<Split> batch = new HashSet<>(); while (splits.hasNext() || !batch.isEmpty()) { Multimap<InternalNode, Split> assignments = data.getNodeSelector().computeAssignments(batch, remoteTasks).getAssignments(); for (InternalNode node : assignments.keySet()) { MockRemoteTaskFactory.MockRemoteTask remoteTask = data.getTaskMap().get(node); remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder() .putAll(new PlanNodeId("sourceId"), assignments.get(node)) .build()); remoteTask.startSplits(MAX_SPLITS_PER_NODE); } if (assignments.size() == batch.size()) { batch.clear(); } else { batch.removeAll(assignments.values()); } while (batch.size() < SPLIT_BATCH_SIZE && splits.hasNext()) { batch.add(splits.next()); } finishingTask.next().finishSplits((int) Math.ceil(MAX_SPLITS_PER_NODE / 50.0)); } return remoteTasks; } @SuppressWarnings("FieldMayBeFinal") @State(Scope.Thread) public static class BenchmarkData { @Param({LEGACY, BENCHMARK, FLAT}) private String topologyName = LEGACY; private FinalizerService finalizerService = new FinalizerService(); private NodeSelector nodeSelector; private Map<InternalNode, MockRemoteTaskFactory.MockRemoteTask> taskMap = new HashMap<>(); private List<Split> splits = new ArrayList<>(); @Setup public void setup() { TestingTransactionHandle transactionHandle = TestingTransactionHandle.create(); finalizerService.start(); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); ImmutableList.Builder<InternalNode> nodeBuilder = ImmutableList.builder(); for (int i = 0; i < NODES; i++) { nodeBuilder.add(new InternalNode("node" + i, URI.create("http://" + addressForHost(i).getHostText()), NodeVersion.UNKNOWN, false)); } List<InternalNode> nodes = nodeBuilder.build(); MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory( newCachedThreadPool(daemonThreadsNamed("remoteTaskExecutor-%s")), newScheduledThreadPool(2, daemonThreadsNamed("remoteTaskScheduledExecutor-%s"))); for (int i = 0; i < nodes.size(); i++) { InternalNode node = nodes.get(i); ImmutableList.Builder<Split> initialSplits = ImmutableList.builder(); for (int j = 0; j < MAX_SPLITS_PER_NODE + MAX_PENDING_SPLITS_PER_TASK_PER_NODE; j++) { initialSplits.add(new Split(CONNECTOR_ID, new TestSplitRemote(i), Lifespan.taskWide())); } TaskId taskId = new TaskId("test", 1, i); MockRemoteTaskFactory.MockRemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, node, initialSplits.build(), nodeTaskMap.createPartitionedSplitCountTracker(node, taskId)); nodeTaskMap.addTask(node, remoteTask); taskMap.put(node, remoteTask); } for (int i = 0; i < SPLITS; i++) { splits.add(new Split(CONNECTOR_ID, new TestSplitRemote(ThreadLocalRandom.current().nextInt(DATA_NODES)), Lifespan.taskWide())); } InMemoryNodeManager nodeManager = new InMemoryNodeManager(); nodeManager.addNode(CONNECTOR_ID, nodes); NodeScheduler nodeScheduler = new NodeScheduler(getNetworkTopology(), nodeManager, getNodeSchedulerConfig(), nodeTaskMap); nodeSelector = nodeScheduler.createNodeSelector(CONNECTOR_ID); } @TearDown public void tearDown() { finalizerService.destroy(); } private NodeSchedulerConfig getNodeSchedulerConfig() { return new NodeSchedulerConfig() .setMaxSplitsPerNode(MAX_SPLITS_PER_NODE) .setIncludeCoordinator(false) .setNetworkTopology(topologyName) .setMaxPendingSplitsPerTask(MAX_PENDING_SPLITS_PER_TASK_PER_NODE); } private NetworkTopology getNetworkTopology() { NetworkTopology topology; switch (topologyName) { case LEGACY: topology = new LegacyNetworkTopology(); break; case FLAT: topology = new FlatNetworkTopology(); break; case BENCHMARK: topology = new BenchmarkNetworkTopology(); break; default: throw new IllegalStateException(); } return topology; } public Map<InternalNode, MockRemoteTaskFactory.MockRemoteTask> getTaskMap() { return taskMap; } public NodeSelector getNodeSelector() { return nodeSelector; } public List<Split> getSplits() { return splits; } } public static void main(String[] args) throws Throwable { Options options = new OptionsBuilder() .verbosity(VerboseMode.NORMAL) .include(".*" + BenchmarkNodeScheduler.class.getSimpleName() + ".*") .build(); new Runner(options).run(); } private static class BenchmarkNetworkTopology implements NetworkTopology { @Override public NetworkLocation locate(HostAddress address) { List<String> parts = new ArrayList<>(ImmutableList.copyOf(Splitter.on(".").split(address.getHostText()))); Collections.reverse(parts); return NetworkLocation.create(parts); } @Override public List<String> getLocationSegmentNames() { return ImmutableList.of("rack", "machine"); } } private static class TestSplitRemote implements ConnectorSplit { private final List<HostAddress> hosts; public TestSplitRemote(int dataHost) { hosts = ImmutableList.of(addressForHost(dataHost)); } @Override public boolean isRemotelyAccessible() { return true; } @Override public List<HostAddress> getAddresses() { return hosts; } @Override public Object getInfo() { return this; } } private static HostAddress addressForHost(int host) { int rack = Integer.hashCode(host) % RACKS; return HostAddress.fromParts("host" + host + ".rack" + rack, 1); } }
{ "content_hash": "6935ed3fa5747db00946b6771ca1ec18", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 203, "avg_line_length": 39.6159169550173, "alnum_prop": 0.6645121844702594, "repo_name": "youngwookim/presto", "id": "23c0e71d8b6e19ccea92c399880ad31e829695b5", "size": "11449", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "presto-main/src/test/java/io/prestosql/execution/BenchmarkNodeScheduler.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ANTLR", "bytes": "26917" }, { "name": "CSS", "bytes": "12957" }, { "name": "HTML", "bytes": "28832" }, { "name": "Java", "bytes": "31475945" }, { "name": "JavaScript", "bytes": "211244" }, { "name": "Makefile", "bytes": "6830" }, { "name": "PLSQL", "bytes": "2797" }, { "name": "PLpgSQL", "bytes": "11504" }, { "name": "Python", "bytes": "7664" }, { "name": "SQLPL", "bytes": "926" }, { "name": "Shell", "bytes": "29871" }, { "name": "Thrift", "bytes": "12631" } ], "symlink_target": "" }
module ActiveScaffold module Helpers # Helpers that assist with the rendering of a Form Column module SearchColumnHelpers # This method decides which input to use for the given column. # It does not do any rendering. It only decides which method is responsible for rendering. def active_scaffold_search_for(column) options = active_scaffold_search_options(column) # first, check if the dev has created an override for this specific field for search if (method = override_search_field(column)) send(method, @record, options) # second, check if the dev has specified a valid search_ui for this column, using specific ui for searches elsif column.search_ui and (method = override_search(column.search_ui)) send(method, column, options) # third, check if the dev has specified a valid search_ui for this column, using generic ui for forms elsif column.search_ui and (method = override_input(column.search_ui)) send(method, column, options) # fourth, check if the dev has created an override for this specific field elsif (method = override_form_field(column)) send(method, @record, options) # fallback: we get to make the decision else if column.association or column.virtual? active_scaffold_search_text(column, options) else # regular model attribute column # if we (or someone else) have created a custom render option for the column type, use that if (method = override_search(column.column.type)) send(method, column, options) # if we (or someone else) have created a custom render option for the column type, use that elsif (method = override_input(column.column.type)) send(method, column, options) # final ultimate fallback: use rails' generic input method else # for textual fields we pass different options text_types = [:text, :string, :integer, :float, :decimal] options = active_scaffold_input_text_options(options) if text_types.include?(column.column.type) text_field(:record, column.name, options.merge(column.options)) end end end end # the standard active scaffold options used for class, name and scope def active_scaffold_search_options(column) { :name => "search[#{column.name}]", :class => "#{column.name}-input", :id => "search_#{column.name}", :value => field_search_params[column.name] } end ## ## Search input methods ## def active_scaffold_search_multi_select(column, options) associated = options.delete :value associated = [associated].compact unless associated.is_a? Array associated.collect!(&:to_i) if column.association select_options = sorted_association_options_find(column.association).collect {|r| [r.to_label, r.id]} else select_options = column.options[:options].collect do |text, value| active_scaffold_translated_option(column, text, value) end end return as_(:no_options) if select_options.empty? active_scaffold_checkbox_list(column, select_options, associated, options) end def active_scaffold_search_select(column, html_options) associated = html_options.delete :value if column.association associated = associated.is_a?(Array) ? associated.map(&:to_i) : associated.to_i unless associated.nil? method = column.association.macro == :belongs_to ? column.association.foreign_key : column.name select_options = sorted_association_options_find(column.association, false) else method = column.name select_options = column.options[:options].collect do |text, value| active_scaffold_translated_option(column, text, value) end end options = { :selected => associated }.merge! column.options html_options.merge! column.options[:html_options] || {} if html_options[:multiple] html_options[:name] += '[]' else options[:include_blank] ||= as_(:_select_) end if optgroup = options.delete(:optgroup) select(:record, method, grouped_options_for_select(column, select_options, optgroup), options, html_options) elsif column.association collection_select(:record, method, select_options, :id, :to_label, options, html_options) else select(:record, method, select_options, options, html_options) end end def active_scaffold_search_text(column, options) text_field :record, column.name, active_scaffold_input_text_options(options) end # we can't use active_scaffold_input_boolean because we need to have a nil value even when column can't be null # to decide whether search for this field or not def active_scaffold_search_boolean(column, options) select_options = [] select_options << [as_(:_select_), nil] select_options << [as_(:true), true] select_options << [as_(:false), false] select_tag(options[:name], options_for_select(select_options, column.column.type_cast(field_search_params[column.name]))) end # we can't use checkbox ui because it's not possible to decide whether search for this field or not alias_method :active_scaffold_search_checkbox, :active_scaffold_search_boolean def active_scaffold_search_null(column, options) select_options = [] select_options << [as_(:_select_), nil] select_options.concat ActiveScaffold::Finder::NullComparators.collect {|comp| [as_(comp), comp]} select_tag(options[:name], options_for_select(select_options, field_search_params[column.name])) end def field_search_params_range_values(column) values = field_search_params[column.name] return nil if values.nil? return values[:opt], (values[:from].blank? ? nil : values[:from]), (values[:to].blank? ? nil : values[:to]) end def active_scaffold_search_range_string?(column) (column.column && column.column.text?) || column.search_ui == :string end def include_null_comparators?(column) return column.options[:null_comparators] if column.options.has_key? :null_comparators if column.association column.association.macro != :belongs_to || active_scaffold_config.columns[column.association.foreign_key].column.try(:null) else column.column.try(:null) end end def active_scaffold_search_range_comparator_options(column) select_options = ActiveScaffold::Finder::NumericComparators.collect {|comp| [as_(comp.downcase.to_sym), comp]} if active_scaffold_search_range_string?(column) select_options.unshift *ActiveScaffold::Finder::StringComparators.collect {|title, comp| [as_(title), comp]} end if include_null_comparators? column select_options += ActiveScaffold::Finder::NullComparators.collect {|comp| [as_(comp), comp]} end select_options end def include_null_comparators?(column) return column.options[:null_comparators] if column.options.has_key? :null_comparators if column.association column.association.macro != :belongs_to || active_scaffold_config.columns[column.association.foreign_key].column.try(:null) else column.column.try(:null) end end def active_scaffold_search_range(column, options) opt_value, from_value, to_value = field_search_params_range_values(column) select_options = active_scaffold_search_range_comparator_options(column) if active_scaffold_search_range_string?(column) text_field_size = 15 opt_value ||= '%?%' else text_field_size = 10 opt_value ||= '=' end from_value = controller.class.condition_value_for_numeric(column, from_value) to_value = controller.class.condition_value_for_numeric(column, to_value) from_value = format_number_value(from_value, column.options) if from_value.is_a?(Numeric) to_value = format_number_value(to_value, column.options) if to_value.is_a?(Numeric) html = select_tag("#{options[:name]}[opt]", options_for_select(select_options, opt_value), :id => "#{options[:id]}_opt", :class => "as_search_range_option") html << content_tag("span", :id => "#{options[:id]}_numeric", :style => ActiveScaffold::Finder::NullComparators.include?(opt_value) ? "display: none" : nil) do text_field_tag("#{options[:name]}[from]", from_value, active_scaffold_input_text_options(:id => options[:id], :size => text_field_size)) << content_tag(:span, (' - ' + text_field_tag("#{options[:name]}[to]", to_value, active_scaffold_input_text_options(:id => "#{options[:id]}_to", :size => text_field_size))).html_safe, :id => "#{options[:id]}_between", :class => "as_search_range_between", :style => (opt_value == 'BETWEEN') ? nil : "display: none") end content_tag :span, html, :class => 'search_range' end alias_method :active_scaffold_search_integer, :active_scaffold_search_range alias_method :active_scaffold_search_decimal, :active_scaffold_search_range alias_method :active_scaffold_search_float, :active_scaffold_search_range alias_method :active_scaffold_search_string, :active_scaffold_search_range def field_search_datetime_value(value) DateTime.new(value[:year].to_i, value[:month].to_i, value[:day].to_i, value[:hour].to_i, value[:minute].to_i, value[:second].to_i) unless value.nil? || value[:year].blank? end def active_scaffold_search_datetime(column, options) opt_value, from_value, to_value = field_search_params_range_values(column) options = column.options.merge(options) helper = "select_#{'date' unless options[:discard_date]}#{'time' unless options[:discard_time]}" send(helper, field_search_datetime_value(from_value), {:include_blank => true, :prefix => "#{options[:name]}[from]"}.merge(options)) << ' - '.html_safe << send(helper, field_search_datetime_value(to_value), {:include_blank => true, :prefix => "#{options[:name]}[to]"}.merge(options)) end def active_scaffold_search_date(column, options) active_scaffold_search_datetime(column, options.merge!(:discard_time => true)) end def active_scaffold_search_time(column, options) active_scaffold_search_datetime(column, options.merge!(:discard_date => true)) end alias_method :active_scaffold_search_timestamp, :active_scaffold_search_datetime ## ## Search column override signatures ## def override_search_field(column) override_helper column, 'search_column' end # the naming convention for overriding search input types with helpers def override_search(form_ui) method = "active_scaffold_search_#{form_ui}" method if respond_to? method end def visibles_and_hiddens(search_config) visibles = [] hiddens = [] search_config.columns.each do |column| next unless column.search_sql if search_config.optional_columns.include?(column.name) && !searched_by?(column) hiddens << column else visibles << column end end return visibles, hiddens end def searched_by?(column) value = field_search_params[column.name] case value when Hash !value['from'].blank? when String !value.blank? else false end end end end end
{ "content_hash": "3e633b5c2234a5a6dc1685028dd673e7", "timestamp": "", "source": "github", "line_count": 264, "max_line_length": 179, "avg_line_length": 45.78409090909091, "alnum_prop": 0.6352279308347811, "repo_name": "BookingBug/active_scaffold", "id": "4ca5fd0e8d1374d3cddba20ce8693fab689de34c", "size": "12087", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/active_scaffold/helpers/search_column_helpers.rb", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "48436" }, { "name": "HTML", "bytes": "57153" }, { "name": "JavaScript", "bytes": "101946" }, { "name": "Ruby", "bytes": "512426" } ], "symlink_target": "" }
ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
{ "content_hash": "79b7247583e3f3f6dafbc3bbe3ac6744", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 31, "avg_line_length": 9.692307692307692, "alnum_prop": 0.7063492063492064, "repo_name": "mdoering/backbone", "id": "dd1989c0d7b6579379edcdf1f660b98db4f9f574", "size": "186", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Psilophyta/Psilotopsida/Psilotales/Psilotaceae/Tmesipteris/Tmesipteris solomonensis/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
package com.integratingfactor.idp.lib.client.rbac; import java.util.logging.Logger; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.EnableAspectJAutoProxy; import org.springframework.web.servlet.config.annotation.EnableWebMvc; @Configuration @EnableWebMvc @EnableAspectJAutoProxy public class IdpRbacTestApiEndpointConfig { private static Logger LOG = Logger.getLogger(IdpRbacTestApiEndpointConfig.class.getName()); @Bean public IdpRbacTestApiEndpoint idpRbacTestApi() { LOG.info("Creating instance of IdpRbacTestApiEndpoint"); return new IdpRbacTestApiEndpoint(); } }
{ "content_hash": "ffea793b2a61e14266204dd783720af8", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 95, "avg_line_length": 34.095238095238095, "alnum_prop": 0.8114525139664804, "repo_name": "Integratingfactor/lib-idp-client", "id": "dcf9e774859f24128f929cecc664e00f943f5585", "size": "716", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/test/java/com/integratingfactor/idp/lib/client/rbac/IdpRbacTestApiEndpointConfig.java", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "89156" } ], "symlink_target": "" }
<?php namespace SebastianBergmann\Comparator; /** * Compares DateTimeInterface instances for equality. */ class DateTimeComparator extends ObjectComparator { /** * Returns whether the comparator can compare two values. * * @param mixed $expected The first value to compare * @param mixed $actual The second value to compare * @return bool */ public function accepts($expected, $actual) { return ($expected instanceof \DateTime || $expected instanceof \DateTimeInterface) && ($actual instanceof \DateTime || $actual instanceof \DateTimeInterface); } /** * Asserts that two values are equal. * * @param mixed $expected First value to compare * @param mixed $actual Second value to compare * @param float $delta Allowed numerical distance between two values to consider them equal * @param bool $canonicalize Arrays are sorted before comparison when set to true * @param bool $ignoreCase Case is ignored when set to true * @param array $processed List of already processed elements (used to prevent infinite recursion) * * @throws ComparisonFailure */ public function assertEquals($expected, $actual, $delta = 0.0, $canonicalize = false, $ignoreCase = false, array &$processed = array()) { $delta = new \DateInterval(sprintf('PT%sS', abs($delta))); $expectedLower = clone $expected; $expectedUpper = clone $expected; if ($actual < $expectedLower->sub($delta) || $actual > $expectedUpper->add($delta)) { throw new ComparisonFailure( $expected, $actual, $this->dateTimeToString($expected), $this->dateTimeToString($actual), false, 'Failed asserting that two DateTime objects are equal.' ); } } /** * Returns an ISO 8601 formatted string representation of a datetime or * 'Invalid DateTimeInterface object' if the provided DateTimeInterface was not properly * initialized. * * @param \DateTimeInterface $datetime * @return string */ private function dateTimeToString($datetime) { $string = $datetime->format('Y-m-d\TH:i:s.uO'); return $string ? $string : 'Invalid DateTimeInterface object'; } }
{ "content_hash": "73993cf6ebf535d50f25c8c2504d6ac8", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 139, "avg_line_length": 35.34285714285714, "alnum_prop": 0.6063055780113177, "repo_name": "gustavokev/preescolar1", "id": "4781c5299687b5ba9aa6a7be96d24a91efb573b9", "size": "2717", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vendor/sebastian/comparator/src/DateTimeComparator.php", "mode": "33261", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "388" }, { "name": "CSS", "bytes": "32330" }, { "name": "HTML", "bytes": "8689942" }, { "name": "JavaScript", "bytes": "70482" }, { "name": "PHP", "bytes": "1931592" } ], "symlink_target": "" }
namespace MultiMiner.Blockchain.Data { class TickerEntry { public double Last { get; set; } public double Buy { get; set; } public double Sell { get; set; } public string Symbol { get; set; } } }
{ "content_hash": "6755b3ca3efdd7908e78541bdaa96115", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 42, "avg_line_length": 24.1, "alnum_prop": 0.5726141078838174, "repo_name": "nwoolls/MultiMiner", "id": "d4502d3ce4b09f246ca55a2b2810ca979fb720ac", "size": "243", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "MultiMiner.Blockchain/Data/CurrencyEntry.cs", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "1171682" }, { "name": "Inno Setup", "bytes": "8117" }, { "name": "Roff", "bytes": "5677330" }, { "name": "Shell", "bytes": "4304" } ], "symlink_target": "" }
package cofh.api.energy; import net.minecraft.nbt.NBTTagCompound; /** * Reference implementation of {@link IEnergyStorage}. Use/extend this or implement your own. * * @author King Lemming * */ public class EnergyStorage implements IEnergyStorage { protected int energy; protected int capacity; protected int maxReceive; protected int maxExtract; public EnergyStorage(int capacity) { this(capacity, capacity, capacity); } public EnergyStorage(int capacity, int maxTransfer) { this(capacity, maxTransfer, maxTransfer); } public EnergyStorage(int capacity, int maxReceive, int maxExtract) { this.capacity = capacity; this.maxReceive = maxReceive; this.maxExtract = maxExtract; } public EnergyStorage readFromNBT(NBTTagCompound nbt) { this.energy = nbt.getInteger("Energy"); if (energy > capacity) { energy = capacity; } return this; } public NBTTagCompound writeToNBT(NBTTagCompound nbt) { if (energy < 0) { energy = 0; } nbt.setInteger("Energy", energy); return nbt; } public void setCapacity(int capacity) { this.capacity = capacity; if (energy > capacity) { energy = capacity; } } public void setMaxTransfer(int maxTransfer) { setMaxReceive(maxTransfer); setMaxExtract(maxTransfer); } public void setMaxReceive(int maxReceive) { this.maxReceive = maxReceive; } public void setMaxExtract(int maxExtract) { this.maxExtract = maxExtract; } public int getMaxReceive() { return maxReceive; } public int getMaxExtract() { return maxExtract; } /** * This function is included to allow for server -> client sync. Do not call this externally to the containing Tile Entity, as not all IEnergyHandlers are * guaranteed to have it. * * @param energy */ public void setEnergyStored(int energy) { this.energy = energy; if (this.energy > capacity) { this.energy = capacity; } else if (this.energy < 0) { this.energy = 0; } } /** * This function is included to allow the containing tile to directly and efficiently modify the energy contained in the EnergyStorage. Do not rely on this * externally, as not all IEnergyHandlers are guaranteed to have it. * * @param energy */ public void modifyEnergyStored(int energy) { this.energy += energy; if (this.energy > capacity) { this.energy = capacity; } else if (this.energy < 0) { this.energy = 0; } } /* IEnergyStorage */ @Override public int receiveEnergy(int maxReceive, boolean simulate) { int energyReceived = Math.min(capacity - energy, Math.min(this.maxReceive, maxReceive)); if (!simulate) { energy += energyReceived; } return energyReceived; } @Override public int extractEnergy(int maxExtract, boolean simulate) { int energyExtracted = Math.min(energy, Math.min(this.maxExtract, maxExtract)); if (!simulate) { energy -= energyExtracted; } return energyExtracted; } @Override public int getEnergyStored() { return energy; } @Override public int getMaxEnergyStored() { return capacity; } }
{ "content_hash": "ce65ab4629c541c6e9114df91dd6118b", "timestamp": "", "source": "github", "line_count": 158, "max_line_length": 156, "avg_line_length": 19.31645569620253, "alnum_prop": 0.703473132372215, "repo_name": "Mazdallier/Mariculture", "id": "b3383aebe5a5da7d1dffd7a6b54ed4a08c3e1235", "size": "3052", "binary": false, "copies": "40", "ref": "refs/heads/master", "path": "src/main/java/cofh/api/energy/EnergyStorage.java", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
<?php namespace Eesn2\BackendBundle\Entity; use Doctrine\ORM\EntityRepository; /** * Bajas_DocentesRepository * * This class was generated by the Doctrine ORM. Add your own custom * repository methods below. */ class Bajas_DocentesRepository extends EntityRepository { }
{ "content_hash": "e264951fa08d080ed51bf67bde66e9d8", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 68, "avg_line_length": 18.6, "alnum_prop": 0.7706093189964157, "repo_name": "pablobilvao/eesn2", "id": "77fdeded92b1ad89f8919b592ea74502678ba96c", "size": "279", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/Eesn2/BackendBundle/Entity/Bajas_DocentesRepository.php", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "13573" }, { "name": "JavaScript", "bytes": "65400" }, { "name": "PHP", "bytes": "178528" }, { "name": "Perl", "bytes": "2647" } ], "symlink_target": "" }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Copyright (c) 2011-2012 Phenixcoin Developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "irc.h" #include "net.h" #include "strlcpy.h" #include "base58.h" using namespace std; using namespace boost; int nGotIRCAddresses = 0; void ThreadIRCSeed2(void* parg); #pragma pack(push, 1) struct ircaddr { struct in_addr ip; short port; }; #pragma pack(pop) string EncodeAddress(const CService& addr) { struct ircaddr tmp; if (addr.GetInAddr(&tmp.ip)) { tmp.port = htons(addr.GetPort()); vector<unsigned char> vch(UBEGIN(tmp), UEND(tmp)); return string("u") + EncodeBase58Check(vch); } return ""; } bool DecodeAddress(string str, CService& addr) { vector<unsigned char> vch; if (!DecodeBase58Check(str.substr(1), vch)) return false; struct ircaddr tmp; if (vch.size() != sizeof(tmp)) return false; memcpy(&tmp, &vch[0], sizeof(tmp)); addr = CService(tmp.ip, ntohs(tmp.port)); return true; } static bool Send(SOCKET hSocket, const char* pszSend) { if (strstr(pszSend, "PONG") != pszSend) printf("IRC SENDING: %s\n", pszSend); const char* psz = pszSend; const char* pszEnd = psz + strlen(psz); while (psz < pszEnd) { int ret = send(hSocket, psz, pszEnd - psz, MSG_NOSIGNAL); if (ret < 0) return false; psz += ret; } return true; } bool RecvLineIRC(SOCKET hSocket, string& strLine) { loop { bool fRet = RecvLine(hSocket, strLine); if (fRet) { if (fShutdown) return false; vector<string> vWords; ParseString(strLine, ' ', vWords); if (vWords.size() >= 1 && vWords[0] == "PING") { strLine[1] = 'O'; strLine += '\r'; Send(hSocket, strLine.c_str()); continue; } } return fRet; } } int RecvUntil(SOCKET hSocket, const char* psz1, const char* psz2=NULL, const char* psz3=NULL, const char* psz4=NULL) { loop { string strLine; strLine.reserve(10000); if (!RecvLineIRC(hSocket, strLine)) return 0; printf("IRC %s\n", strLine.c_str()); if (psz1 && strLine.find(psz1) != string::npos) return 1; if (psz2 && strLine.find(psz2) != string::npos) return 2; if (psz3 && strLine.find(psz3) != string::npos) return 3; if (psz4 && strLine.find(psz4) != string::npos) return 4; } } bool Wait(int nSeconds) { if (fShutdown) return false; printf("IRC waiting %d seconds to reconnect\n", nSeconds); for (int i = 0; i < nSeconds; i++) { if (fShutdown) return false; Sleep(1000); } return true; } bool RecvCodeLine(SOCKET hSocket, const char* psz1, string& strRet) { strRet.clear(); loop { string strLine; if (!RecvLineIRC(hSocket, strLine)) return false; vector<string> vWords; ParseString(strLine, ' ', vWords); if (vWords.size() < 2) continue; if (vWords[1] == psz1) { printf("IRC %s\n", strLine.c_str()); strRet = strLine; return true; } } } bool GetIPFromIRC(SOCKET hSocket, string strMyName, CNetAddr& ipRet) { Send(hSocket, strprintf("USERHOST %s\r", strMyName.c_str()).c_str()); string strLine; if (!RecvCodeLine(hSocket, "302", strLine)) return false; vector<string> vWords; ParseString(strLine, ' ', vWords); if (vWords.size() < 4) return false; string str = vWords[3]; if (str.rfind("@") == string::npos) return false; string strHost = str.substr(str.rfind("@")+1); // Hybrid IRC used by lfnet always returns IP when you userhost yourself, // but in case another IRC is ever used this should work. printf("GetIPFromIRC() got userhost %s\n", strHost.c_str()); CNetAddr addr(strHost, true); if (!addr.IsValid()) return false; ipRet = addr; return true; } void ThreadIRCSeed(void* parg) { IMPLEMENT_RANDOMIZE_STACK(ThreadIRCSeed(parg)); // Make this thread recognisable as the IRC seeding thread RenameThread("bitcoin-ircseed"); try { ThreadIRCSeed2(parg); } catch (std::exception& e) { PrintExceptionContinue(&e, "ThreadIRCSeed()"); } catch (...) { PrintExceptionContinue(NULL, "ThreadIRCSeed()"); } printf("ThreadIRCSeed exited\n"); } void ThreadIRCSeed2(void* parg) { /* Dont advertise on IRC if we don't allow incoming connections */ if (mapArgs.count("-connect") || fNoListen) return; if (!GetBoolArg("-irc", false)) return; printf("ThreadIRCSeed started\n"); int nErrorWait = 10; int nRetryWait = 10; while (!fShutdown) { CService addrConnect("199.201.107.112", 6667); CService addrIRC("irc.phenixcoin.com", 6667, true); if (addrIRC.IsValid()) addrConnect = addrIRC; SOCKET hSocket; if (!ConnectSocket(addrConnect, hSocket)) { printf("IRC connect failed\n"); nErrorWait = nErrorWait * 11 / 10; if (Wait(nErrorWait += 60)) continue; else return; } if (!RecvUntil(hSocket, "Found your hostname", "using your IP address instead", "Couldn't look up your hostname", "ignoring hostname")) { closesocket(hSocket); hSocket = INVALID_SOCKET; nErrorWait = nErrorWait * 11 / 10; if (Wait(nErrorWait += 60)) continue; else return; } CNetAddr addrIPv4("1.2.3.4"); // arbitrary IPv4 address to make GetLocal prefer IPv4 addresses CService addrLocal; string strMyName; if (GetLocal(addrLocal, &addrIPv4)) strMyName = EncodeAddress(GetLocalAddress(&addrConnect)); if (strMyName == "") strMyName = strprintf("x%u", GetRand(1000000000)); Send(hSocket, strprintf("NICK %s\r", strMyName.c_str()).c_str()); Send(hSocket, strprintf("USER %s 8 * : %s\r", strMyName.c_str(), strMyName.c_str()).c_str()); int nRet = RecvUntil(hSocket, " 004 ", " 433 "); if (nRet != 1) { closesocket(hSocket); hSocket = INVALID_SOCKET; if (nRet == 2) { printf("IRC name already in use\n"); Wait(10); continue; } nErrorWait = nErrorWait * 11 / 10; if (Wait(nErrorWait += 60)) continue; else return; } Sleep(500); // Get our external IP from the IRC server and re-nick before joining the channel CNetAddr addrFromIRC; if (GetIPFromIRC(hSocket, strMyName, addrFromIRC)) { printf("GetIPFromIRC() returned %s\n", addrFromIRC.ToString().c_str()); if (addrFromIRC.IsRoutable()) { // IRC lets you to re-nick AddLocal(addrFromIRC, LOCAL_IRC); strMyName = EncodeAddress(GetLocalAddress(&addrConnect)); Send(hSocket, strprintf("NICK %s\r", strMyName.c_str()).c_str()); } } if (fTestNet) { Send(hSocket, "JOIN #phenixcoinTEST3\r"); Send(hSocket, "WHO #phenixcoinTEST3\r"); } else { // randomly join #phenixcoin00-#phenixcoin99 int channel_number = GetRandInt(100); channel_number = 0; // Phenixcoin: for now, just use one channel Send(hSocket, strprintf("JOIN #phenixcoin%02d\r", channel_number).c_str()); Send(hSocket, strprintf("WHO #phenixcoin%02d\r", channel_number).c_str()); } int64 nStart = GetTime(); string strLine; strLine.reserve(10000); while (!fShutdown && RecvLineIRC(hSocket, strLine)) { if (strLine.empty() || strLine.size() > 900 || strLine[0] != ':') continue; vector<string> vWords; ParseString(strLine, ' ', vWords); if (vWords.size() < 2) continue; char pszName[10000]; pszName[0] = '\0'; if (vWords[1] == "352" && vWords.size() >= 8) { // index 7 is limited to 16 characters // could get full length name at index 10, but would be different from join messages strlcpy(pszName, vWords[7].c_str(), sizeof(pszName)); printf("IRC got who\n"); } if (vWords[1] == "JOIN" && vWords[0].size() > 1) { // :username!username@50000007.F000000B.90000002.IP JOIN :#channelname strlcpy(pszName, vWords[0].c_str() + 1, sizeof(pszName)); if (strchr(pszName, '!')) *strchr(pszName, '!') = '\0'; printf("IRC got join\n"); } if (pszName[0] == 'u') { CAddress addr; if (DecodeAddress(pszName, addr)) { addr.nTime = GetAdjustedTime(); if (addrman.Add(addr, addrConnect, 51 * 60)) printf("IRC got new address: %s\n", addr.ToString().c_str()); nGotIRCAddresses++; } else { printf("IRC decode failed\n"); } } } closesocket(hSocket); hSocket = INVALID_SOCKET; if (GetTime() - nStart > 20 * 60) { nErrorWait /= 3; nRetryWait /= 3; } nRetryWait = nRetryWait * 11 / 10; if (!Wait(nRetryWait += 60)) return; } } #ifdef TEST int main(int argc, char *argv[]) { WSADATA wsadata; if (WSAStartup(MAKEWORD(2,2), &wsadata) != NO_ERROR) { printf("Error at WSAStartup()\n"); return false; } ThreadIRCSeed(NULL); WSACleanup(); return 0; } #endif
{ "content_hash": "0ec7e90436c412890377f7c61d731ab6", "timestamp": "", "source": "github", "line_count": 395, "max_line_length": 143, "avg_line_length": 26.645569620253166, "alnum_prop": 0.5305463182897863, "repo_name": "phenixcoin/Phenixcoin", "id": "76af6912d536cd421fba231627bd934815197424", "size": "10525", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/irc.cpp", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "59072" }, { "name": "C++", "bytes": "1395291" }, { "name": "Objective-C", "bytes": "2463" }, { "name": "Prolog", "bytes": "10996" }, { "name": "Python", "bytes": "18144" }, { "name": "Shell", "bytes": "1144" }, { "name": "TypeScript", "bytes": "3830799" } ], "symlink_target": "" }
**Learning Objectives:** 1. Write a Dockerfile to define a custom image 2. Build and push the image to your Container Registry 2. Create an AI Platform Notebook using a custom container image In this lab, you will provision an AI Platfom Notebooks instance using a custom container image. The accompanying lab - `provisioning-kfp` - describe the steps to provision other services in the MLOps environment, including a standalone deployment of Kubeflow Pipelines. ## Enabling the required cloud services In addition to the [services enabled by default](https://cloud.google.com/service-usage/docs/enabled-service), the following additional services must be enabled to provision an instance of **AI Platform Notebooks**: 1. Compute Engine 1. Container Registry 1. Cloud Build Use [GCP Console](https://console.cloud.google.com/) or `gcloud` command line interface in [Cloud Shell](https://cloud.google.com/shell/docs/) to [enable the required services](https://cloud.google.com/service-usage/docs/enable-disable) . You can enable the required services using `gcloud`: 1. Start GCP [Cloud Shell](https://cloud.google.com/shell/docs/) 2. Make sure that **Cloud Shell** is configured to use your project. In Cloud Shell, bype the following commands. Replace `[YOUR_PROJECT_ID]` with your GCP Project ID. ``` PROJECT_ID=[YOUR_PROJECT_ID] gcloud config set project $PROJECT_ID ``` 3. Enable services ``` gcloud services enable \ compute.googleapis.com \ container.googleapis.com \ cloudbuild.googleapis.com ``` ## Creating an **AI Platform Notebooks** instance You will use a custom container image with KFP and TFX SDKs pre-installed to create your instance. ### Building a custom docker image: 1. In **Cloud Shell**, create a working folder in your `home` directory ``` cd mkdir lab-workspace cd lab-workspace ``` 2. Create the requirements file with the Python packages to deploy to your instance ``` cat > requirements.txt << EOF pandas<1.0.0 click==7.0 tfx==0.21.4 kfp==0.5.1 EOF ``` 3. TODO: Create a Dockerfile defining your custom container image within the `lab-workspace` directory. Your Dockerfile should execute the following steps: - use FROM to define the base image `gcr.io/deeplearning-platform-release/base-cpu:m42`. This will be used to start the build process. - use RUN to execute the following directives - update `apt-get` and use apt-get to install `kubectl` - use `curl` to download `skaffold` using ```bash curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 ```` - change permissions on `skaffold` with ```bash chmod +x skaffold ``` - move `skaffold` to the `/usr/local/bin` directory with ```bash mv skaffold /usr/local/bin ``` - use COPY to copy the `requirements.txt` file you wrote above - use RUN to install the requirements using ```bash python -m pip install -U -r requirements.txt --ignore-installed PyYAML==5.3.1 ``` 4. TODO: Build the image and push it to your project's **Container Registry**. Use `gcloud builds submit` to submit a build using Google Cloud Build. Use the `--tag` flag for Cloud Build to build using the Dockerfile you created above. The tag should have the format `gcr.io/<YOUR_PROJECT_ID>/mlops-dev:latest` ### Provisioning an AI Platform notebook instance 5. TODO: Provision an instance of **AI Platform Notebooks** using the `gcloud compute instances create` command with the following specifications: - the `zone` should be your compute zone - the `image-family` should be "common-container" - the `machine-type` should be an n1 standard-4 machine. See all machine types [here](https://cloud.google.com/compute/docs/machine-types). - the `maintenance-policy` should be set to TERMINATE - the `boot-disk-device-name` should have the format `<YOUR_INSTANCE_NAME>-disk` - the `boot-disk-size` should be 100GB - the `boot-disk-type` should be set to a SSD persistent disk - the `scopes` should be `cloud-platform` and `userinfo-email` - the `metatdata` should have the follow key value pairs: - proxy-mode=service_account - container="gcr.io/<YOUR_PROJECT_ID>/mlops-dev:latest" ### Accessing JupyterLab IDE After the instance is created, you can connect to [JupyterLab](https://jupyter.org/) IDE by clicking the *OPEN JUPYTERLAB* link.
{ "content_hash": "14abc9df036cec0f5b223859618e1973", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 239, "avg_line_length": 41.09345794392523, "alnum_prop": 0.7334546281555606, "repo_name": "GoogleCloudPlatform/mlops-on-gcp", "id": "de0e41afd322f966104b1203ec16b79161b50d36", "size": "4443", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/mlops-env-on-gcp/creating-notebook-instance/exercises/README.md", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "15195" }, { "name": "HCL", "bytes": "8348" }, { "name": "JavaScript", "bytes": "1143" }, { "name": "Jupyter Notebook", "bytes": "6737030" }, { "name": "Mustache", "bytes": "1946" }, { "name": "Python", "bytes": "1235643" }, { "name": "Shell", "bytes": "30775" } ], "symlink_target": "" }
import type { RootTag, TurboModule, } from 'react-native/Libraries/TurboModule/RCTExport'; import * as TurboModuleRegistry from 'react-native/Libraries/TurboModule/TurboModuleRegistry'; type Animal = ?{| name: ?string, |}; export interface Spec extends TurboModule { // Exported methods. +getConstants: () => {| const1: ?boolean, const2: ?number, const3: ?string, |}; +voidFunc: () => void; +getBool: (arg: ?boolean) => ?boolean; +getNumber: (arg: ?number) => ?number; +getString: (arg: ?string) => ?string; +getArray: (arg: ?Array<any>) => ?Array<any>; +getObject: (arg: ?Object) => ?Object; +getObjectShape: (arg: ?{|prop: ?number|}) => ?{|prop: ?number|}; +getAlias: (arg: ?Animal) => ?Animal; +getRootTag: (arg: ?RootTag) => ?RootTag; +getValue: (x: ?number, y: ?string, z: ?Object) => ?Object; +getValueWithCallback: (callback: (value: ?string) => void) => void; +getValueWithPromise: (error: ?boolean) => ?Promise<string>; } export default (TurboModuleRegistry.getEnforcing<Spec>( 'SampleTurboModuleNullable', ): Spec);
{ "content_hash": "764c880f38745df1c2b13ba779b4087e", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 94, "avg_line_length": 30.02777777777778, "alnum_prop": 0.6466234967622572, "repo_name": "arthuralee/react-native", "id": "1c174e7f9a8e9f4df781cff9f23d28798b791889", "size": "1292", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "packages/react-native-codegen/e2e/__test_fixtures__/modules/NativeSampleTurboModuleNullable.js", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "AppleScript", "bytes": "1167" }, { "name": "C", "bytes": "35996" }, { "name": "CSS", "bytes": "16240" }, { "name": "HTML", "bytes": "4755" }, { "name": "JavaScript", "bytes": "1220707" }, { "name": "Objective-C", "bytes": "939193" }, { "name": "Ruby", "bytes": "4321" }, { "name": "Shell", "bytes": "5721" } ], "symlink_target": "" }
package kr.nor.spring.rest.services.impl.telegram; import kr.nor.spring.rest.services.impl.telegram.struct.Update; import java.util.List; public class GetUpdatesResult { private boolean ok; private List<Update> result; public boolean isOk() { return ok; } public void setOk(boolean ok) { this.ok = ok; } public List<Update> getResult() { return result; } public void setResult(List<Update> result) { this.result = result; } }
{ "content_hash": "f628d9b560582ae001e9a6a5e31a3846", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 63, "avg_line_length": 19.423076923076923, "alnum_prop": 0.6415841584158416, "repo_name": "leios76/telegram-notifier", "id": "48b016320e3197150aae2b948277c0f8866df69c", "size": "505", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/main/java/kr/nor/spring/rest/services/impl/telegram/GetUpdatesResult.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "146001" }, { "name": "HTML", "bytes": "4069" }, { "name": "Java", "bytes": "29750" } ], "symlink_target": "" }
"""Generic device for the HomematicIP Cloud component.""" import logging from typing import Optional from homematicip.aio.device import AsyncDevice from homematicip.aio.home import AsyncHome from homeassistant.components import homematicip_cloud from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_LOW_BATTERY = 'low_battery' ATTR_MODEL_TYPE = 'model_type' # RSSI HAP -> Device ATTR_RSSI_DEVICE = 'rssi_device' # RSSI Device -> HAP ATTR_RSSI_PEER = 'rssi_peer' ATTR_SABOTAGE = 'sabotage' ATTR_GROUP_MEMBER_UNREACHABLE = 'group_member_unreachable' class HomematicipGenericDevice(Entity): """Representation of an HomematicIP generic device.""" def __init__(self, home: AsyncHome, device, post: Optional[str] = None) -> None: """Initialize the generic device.""" self._home = home self._device = device self.post = post _LOGGER.info("Setting up %s (%s)", self.name, self._device.modelType) @property def device_info(self): """Return device specific attributes.""" # Only physical devices should be HA devices. if isinstance(self._device, AsyncDevice): return { 'identifiers': { # Serial numbers of Homematic IP device (homematicip_cloud.DOMAIN, self._device.id) }, 'name': self._device.label, 'manufacturer': self._device.oem, 'model': self._device.modelType, 'sw_version': self._device.firmwareVersion, 'via_hub': (homematicip_cloud.DOMAIN, self._device.homeId), } return None async def async_added_to_hass(self): """Register callbacks.""" self._device.on_update(self._device_changed) def _device_changed(self, *args, **kwargs): """Handle device state changes.""" _LOGGER.debug("Event %s (%s)", self.name, self._device.modelType) self.async_schedule_update_ha_state() @property def name(self) -> str: """Return the name of the generic device.""" name = self._device.label if self._home.name is not None and self._home.name != '': name = "{} {}".format(self._home.name, name) if self.post is not None and self.post != '': name = "{} {}".format(name, self.post) return name @property def should_poll(self) -> bool: """No polling needed.""" return False @property def available(self) -> bool: """Device available.""" return not self._device.unreach @property def unique_id(self) -> str: """Return a unique ID.""" return "{}_{}".format(self.__class__.__name__, self._device.id) @property def icon(self) -> Optional[str]: """Return the icon.""" if hasattr(self._device, 'lowBat') and self._device.lowBat: return 'mdi:battery-outline' if hasattr(self._device, 'sabotage') and self._device.sabotage: return 'mdi:alert' return None @property def device_state_attributes(self): """Return the state attributes of the generic device.""" attr = {ATTR_MODEL_TYPE: self._device.modelType} if hasattr(self._device, 'lowBat') and self._device.lowBat: attr[ATTR_LOW_BATTERY] = self._device.lowBat if hasattr(self._device, 'sabotage') and self._device.sabotage: attr[ATTR_SABOTAGE] = self._device.sabotage if hasattr(self._device, 'rssiDeviceValue') and \ self._device.rssiDeviceValue: attr[ATTR_RSSI_DEVICE] = self._device.rssiDeviceValue if hasattr(self._device, 'rssiPeerValue') and \ self._device.rssiPeerValue: attr[ATTR_RSSI_PEER] = self._device.rssiPeerValue return attr
{ "content_hash": "a85fa6706dd0496437ea27b84ca0a271", "timestamp": "", "source": "github", "line_count": 109, "max_line_length": 77, "avg_line_length": 35.72477064220183, "alnum_prop": 0.6019517205957884, "repo_name": "jnewland/home-assistant", "id": "6bbbb8b4fab4a887ce98c9aa1111975715893d97", "size": "3894", "binary": false, "copies": "1", "ref": "refs/heads/ci", "path": "homeassistant/components/homematicip_cloud/device.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1175" }, { "name": "Dockerfile", "bytes": "1081" }, { "name": "Python", "bytes": "15240512" }, { "name": "Ruby", "bytes": "745" }, { "name": "Shell", "bytes": "17862" } ], "symlink_target": "" }
<?php namespace ServiceBus; interface ICommand { }
{ "content_hash": "4072bfef42c5b2aeed8a29488f958162", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 21, "avg_line_length": 7.428571428571429, "alnum_prop": 0.75, "repo_name": "psamatt/ServiceBusLite", "id": "550775db87342ecfbc9cfe784cdb0e75e5527373", "size": "52", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/ServiceBus/ICommand.php", "mode": "33188", "license": "mit", "language": [ { "name": "PHP", "bytes": "12611" } ], "symlink_target": "" }
from unreal_engine.classes import PyFactory, StaticMesh, Object, Class import unreal_engine as ue from collada import Collada from unreal_engine.structs import StaticMeshSourceModel, MeshBuildSettings from unreal_engine import FRawMesh import numpy from unreal_engine import FVector, FRotator from unreal_engine import SWindow, SVerticalBox, SHorizontalBox, SButton, SRotatorInputBox from unreal_engine.enums import EHorizontalAlignment from unreal_engine.classes import Material from unreal_engine.structs import Rotator, StaticMaterial class ColladaImportOptions(Object): DefaultRotation = Rotator DefaultMaterial = Material class ColladaFactory(PyFactory): ImportOptions = ColladaImportOptions() def __init__(self): # inform the editor that this class is able to import assets self.bEditorImport = True # register the .dae extension as supported self.Formats = ['dae;Collada'] # set the UClass this UFactory will generate self.SupportedClass = StaticMesh def open_collada_wizard(self): def cancel_import(): self.wizard.request_destroy() def confirm_import(): self.do_import = True self.wizard.request_destroy() self.wizard = SWindow(title='Collada Import Options', modal=True, sizing_rule=1)( SVerticalBox() ( ue.create_detail_view(self.ImportOptions), auto_height=True, padding = 10 ) ( SHorizontalBox() ( SButton(text='Cancel', on_clicked=cancel_import, h_align = EHorizontalAlignment.HAlign_Center) ) ( SButton(text='Import', on_clicked=confirm_import, h_align = EHorizontalAlignment.HAlign_Center) ), auto_height=True, padding = 4, ), ) self.wizard.add_modal() # this functions starts with an uppercase letter, so it will be visible to the UE system # not required obviously, but it will be a good example def FixMeshData(self): # move from collada system (y on top) to ue4 one (z on top, forward decreases over viewer) for i in range(0, len(self.vertices), 3): xv, yv, zv = self.vertices[i], self.vertices[i+1], self.vertices[i+2] # invert forward vec = FVector(zv * -1, xv, yv) * self.ImportOptions.DefaultRotation self.vertices[i] = vec.x self.vertices[i+1] = vec.y self.vertices[i+2] = vec.z xn, yn, zn = self.normals[i], self.normals[i+1], self.normals[i+2] nor = FVector(zn * -1, xn, yn) * self.ImportOptions.DefaultRotation # invert forward self.normals[i] = nor.x self.normals[i+1] = nor.y self.normals[i+2] = nor.z # fix uvs from 0 on bottom to 0 on top for i, uv in enumerate(self.uvs): if i % 2 != 0: self.uvs[i] = 1 - uv def PyFactoryCreateFile(self, uclass: Class, parent: Object, name: str, filename: str) -> Object: # load the collada file dae = Collada(filename) ue.log_warning(dae) self.do_import = False self.open_collada_wizard() if not self.do_import: return None # create a new UStaticMesh with the specified name and parent static_mesh = StaticMesh(name, parent) # prepare a new model with the specified build settings source_model = StaticMeshSourceModel(BuildSettings=MeshBuildSettings(bRecomputeNormals=False, bRecomputeTangents=True, bUseMikkTSpace=True, bBuildAdjacencyBuffer=True, bRemoveDegenerates=True)) # extract vertices, uvs and normals from the da file (numpy.ravel will flatten the arrays to simple array of floats) triset = dae.geometries[0].primitives[0] self.vertices = numpy.ravel(triset.vertex[triset.vertex_index]) # take the first uv channel (there could be multiple channels, like the one for lightmapping) self.uvs = numpy.ravel(triset.texcoordset[0][triset.texcoord_indexset[0]]) self.normals = numpy.ravel(triset.normal[triset.normal_index]) # fix mesh data self.FixMeshData() # create a new mesh, FRawMesh is an ptopmized wrapper exposed by the python plugin. read: no reflection involved mesh = FRawMesh() # assign vertices mesh.set_vertex_positions(self.vertices) # uvs are required mesh.set_wedge_tex_coords(self.uvs) # normals are optionals mesh.set_wedge_tangent_z(self.normals) # assign indices (not optimized, just return the list of triangles * 3...) mesh.set_wedge_indices(numpy.arange(0, len(triset) * 3)) # assign the FRawMesh to the LOD0 (the model we created before) mesh.save_to_static_mesh_source_model(source_model) # assign LOD0 to the SataticMesh and build it static_mesh.SourceModels = [source_model] static_mesh.static_mesh_build() static_mesh.static_mesh_create_body_setup() static_mesh.StaticMaterials = [StaticMaterial(MaterialInterface=self.ImportOptions.DefaultMaterial, MaterialSlotName='Main')] return static_mesh
{ "content_hash": "7bbccb7a214be8516fdc3951c92f9d46", "timestamp": "", "source": "github", "line_count": 141, "max_line_length": 201, "avg_line_length": 40.04964539007092, "alnum_prop": 0.6047458827696122, "repo_name": "getnamo/UnrealEnginePython", "id": "33b8b0edcfd7cdf9c1d126ba8013ee308e209b0a", "size": "5647", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tutorials/WritingAColladaFactoryWithPython_Assets/collada_factory.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1177094" }, { "name": "C#", "bytes": "23839" }, { "name": "C++", "bytes": "2133454" }, { "name": "Python", "bytes": "109035" }, { "name": "Shell", "bytes": "232" } ], "symlink_target": "" }
@interface GYZChooseCityDemoTests : XCTestCase @end @implementation GYZChooseCityDemoTests - (void)setUp { [super setUp]; // Put setup code here. This method is called before the invocation of each test method in the class. } - (void)tearDown { // Put teardown code here. This method is called after the invocation of each test method in the class. [super tearDown]; } - (void)testExample { // This is an example of a functional test case. // Use XCTAssert and related functions to verify your tests produce the correct results. } - (void)testPerformanceExample { // This is an example of a performance test case. [self measureBlock:^{ // Put the code you want to measure the time of here. }]; } @end
{ "content_hash": "a13760e1e65ac13bf2c73ab626260c4f", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 107, "avg_line_length": 25.93103448275862, "alnum_prop": 0.7021276595744681, "repo_name": "huang303513/The-Demo-Of-UITableView", "id": "7aae863bec81f54fe862a77e1ad5b54dbaf69829", "size": "931", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "地理位置定位、城市搜索/GYZChooseCityDemo/GYZChooseCityDemoTests/GYZChooseCityDemoTests.m", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "448" }, { "name": "Objective-C", "bytes": "2101994" }, { "name": "Ruby", "bytes": "906" }, { "name": "Shell", "bytes": "23758" } ], "symlink_target": "" }
package org.apache.geronimo.management.geronimo; /** * Represents a JMS broker * * @version $Rev$ $Date$ */ public interface JMSBroker extends NetworkContainer { }
{ "content_hash": "fc41cbc3511da09065c1cfd8bd7962e2", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 53, "avg_line_length": 17, "alnum_prop": 0.7235294117647059, "repo_name": "apache/geronimo", "id": "8870293e427e1c8a1faf35e8b399aa2a7fdd9ddf", "size": "983", "binary": false, "copies": "2", "ref": "refs/heads/trunk", "path": "framework/modules/geronimo-management/src/main/java/org/apache/geronimo/management/geronimo/JMSBroker.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "29627" }, { "name": "CSS", "bytes": "47972" }, { "name": "HTML", "bytes": "838469" }, { "name": "Java", "bytes": "8975734" }, { "name": "JavaScript", "bytes": "906" }, { "name": "Shell", "bytes": "32814" }, { "name": "XSLT", "bytes": "4468" } ], "symlink_target": "" }
using System.Management.Automation; using Microsoft.Azure.Commands.Sql.Common; using Microsoft.Azure.Commands.Common.Authentication.Abstractions; using Microsoft.Azure.Commands.Sql.AdvancedThreatProtection.Services; using Microsoft.Azure.Commands.Sql.AdvancedThreatProtection.Model; using Microsoft.Azure.Commands.Sql.Server.Model; using System; namespace Microsoft.Azure.Commands.Sql.AdvancedThreatProtection.Cmdlet { /// <summary> /// The base class for all Azure Sql server Advanced Threat Protection Cmdlets /// </summary> public abstract class SqlServerAdvancedThreatProtectionCmdletBase : AzureSqlCmdletBase<ServerAdvancedThreatProtectionPolicyModel, SqlAdvancedThreatProtectionAdapter> { protected const string UseParentResourceParameterSet = "UseParentResourceParameterSet"; /// <summary> /// Server resource /// </summary> [Parameter(ParameterSetName = UseParentResourceParameterSet, Mandatory = false, ValueFromPipeline = true, HelpMessage = "The server object to use with Advanced Threat Protection policy operation ")] [ValidateNotNullOrEmpty] public AzureSqlServerModel InputObject { get; set; } /// <summary> /// Gets or sets the name of the database server to use. /// </summary> [Parameter(Mandatory = true, ValueFromPipelineByPropertyName = true, HelpMessage = "SQL Database server name.")] [ValidateNotNullOrEmpty] public string ServerName { get; set; } /// <summary> /// Provides the model element that this cmdlet operates on /// </summary> /// <returns>A model object</returns> protected override ServerAdvancedThreatProtectionPolicyModel GetEntity() { string resourceGroupName = ResourceGroupName; string serverName = ServerName; if (string.Equals(this.ParameterSetName, UseParentResourceParameterSet, StringComparison.OrdinalIgnoreCase)) { resourceGroupName = InputObject.ResourceGroupName; serverName = InputObject.ServerName; } return new ServerAdvancedThreatProtectionPolicyModel() { ResourceGroupName = resourceGroupName, ServerName = serverName }; } /// <summary> /// Creation and initialization of the ModelAdapter object /// </summary> /// <param name="subscription">The AzureSubscription in which the current execution is performed</param> /// <returns>An initialized and ready to use ModelAdapter object</returns> protected override SqlAdvancedThreatProtectionAdapter InitModelAdapter(IAzureSubscription subscription) { return new SqlAdvancedThreatProtectionAdapter(DefaultProfile.DefaultContext); } } }
{ "content_hash": "9ee0bb14af8616d5ffe85682d83dc960", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 169, "avg_line_length": 42.36231884057971, "alnum_prop": 0.6804652754019843, "repo_name": "AzureAutomationTeam/azure-powershell", "id": "0847a7b273958800e9ccdd6726210944a2e3b5cb", "size": "3679", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "src/ResourceManager/Sql/Commands.Sql/AdvancedThreatProtection/Cmdlet/SqlServerAdvancedThreatProtectionCmdletBase.cs", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C#", "bytes": "14962309" }, { "name": "HTML", "bytes": "209" }, { "name": "JavaScript", "bytes": "4979" }, { "name": "PHP", "bytes": "41" }, { "name": "PowerShell", "bytes": "691666" }, { "name": "Python", "bytes": "20483" }, { "name": "Shell", "bytes": "15168" } ], "symlink_target": "" }
using LiNGS.Client.GameLogic; using LiNGS.Client.Simulation.Simulators; using LiNGS.Common.GameCycle; using System; using System.Collections.Generic; using System.Linq; using System.Text; namespace LiNGS.Client.Simulation { internal class Simulator : IUpdatable { private LiNGSClient client; private List<SimulatedObjectField> simulatedFields; private SimulatorLogic simulatorLogic; internal Simulator(LiNGSClient client) { this.client = client; this.simulatedFields = new List<SimulatedObjectField>(); this.simulatorLogic = client.ClientProperties.FieldSimulationLogic; } internal void NewFieldValue(NetworkedObject networkedObject, string fieldName, string value) { SimulatedObjectField simulatedObjectField = simulatedFields.FirstOrDefault(sf => sf.NetworkedObject == networkedObject && sf.FieldName == fieldName); if (simulatedObjectField != null) { simulatedObjectField.ReceivedValue(value); } else { networkedObject.ReceiveValue(fieldName, value); } } internal void RegisterNetworkedObject(NetworkedObject networkedObject) { for (int i = 0; i < networkedObject.Fields.Length; i++) { if (networkedObject.IsFieldSimulated[i]) { simulatedFields.Add(new SimulatedObjectField() { Field = networkedObject.Fields[i], FieldName = i.ToString(), NetworkedObject = networkedObject }); } } } internal void UnregisterNetworkedObject(NetworkedObject networkedObject) { simulatedFields.RemoveAll(sf => sf.NetworkedObject == networkedObject); } #region IUpdatable Members public void Update(TimeSpan timeSinceLastUpdate) { foreach (var item in simulatedFields) { simulatorLogic.RunSimulation(item); } } #endregion } }
{ "content_hash": "489daea44888c46a8f300d8a3176c017", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 167, "avg_line_length": 31.96969696969697, "alnum_prop": 0.6189573459715639, "repo_name": "valterc/lings", "id": "0974af4d5922c0c5c5eea145c3bbf7603cd65908", "size": "2112", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "LiNGSClient/Simulation/Simulator.cs", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "312746" } ], "symlink_target": "" }
// Copyright 2009 the Sputnik authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /** * @name: S15.4.4.1_A2; * @section: 15.4.4.1, 15.2.4.7, 12.6.4; * @assertion: The constructor property of Array has the attribute DontEnum; * @description: Checking use propertyIsEnumerable, for-in; */ //CHECK#1 if (Array.propertyIsEnumerable('constructor') !== false) { $ERROR('#1: Array.propertyIsEnumerable(\'constructor\') === false. Actual: ' + (Array.propertyIsEnumerable('constructor'))); } //CHECK#2 var result = true; for (var p in Array){ if (p === "constructor") { result = false; } } if (result !== true) { $ERROR('#2: result = true; for (p in Array) { if (p === "constructor") result = false; } result === true;'); }
{ "content_hash": "921c2fa2ea947b59de904ea44c0f7afe", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 126, "avg_line_length": 29.214285714285715, "alnum_prop": 0.6259168704156479, "repo_name": "remobjects/script", "id": "bdc2fed0c0dfa59db603413e9ea4775907982419", "size": "818", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Test/sputniktests/tests/Conformance/15_Native_ECMA_Script_Objects/15.4_Array_Objects/15.4.4_Properties_of_the_Array_Prototype_Object/15.4.4.1_Array_prototype_constructor/S15.4.4.1_A2.js", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1755" }, { "name": "CSS", "bytes": "15950" }, { "name": "HTML", "bytes": "431518" }, { "name": "JavaScript", "bytes": "9852539" }, { "name": "Pascal", "bytes": "802577" }, { "name": "Python", "bytes": "29664" } ], "symlink_target": "" }
package com.cloudcontrolled.api.client.security; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import javax.net.ssl.TrustManager; import com.sun.net.ssl.internal.ssl.X509ExtendedTrustManager; /** * <p> * DumbX509TrustManager class. * </p> * * @author Denis Neuling (denisneuling@gmail.com) * */ public class DumbX509TrustManager extends X509ExtendedTrustManager implements TrustManager { /* * (non-Javadoc) * * @see * javax.net.ssl.X509TrustManager#checkClientTrusted(java.security.cert. * X509Certificate[], java.lang.String) */ /** {@inheritDoc} */ public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException { } /* * (non-Javadoc) * * @see * javax.net.ssl.X509TrustManager#checkServerTrusted(java.security.cert. * X509Certificate[], java.lang.String) */ /** {@inheritDoc} */ public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException { } /* * (non-Javadoc) * * @see javax.net.ssl.X509TrustManager#getAcceptedIssuers() */ /** * <p> * getAcceptedIssuers. * </p> * * @return an array of {@link java.security.cert.X509Certificate} objects. */ public X509Certificate[] getAcceptedIssuers() { return null; } /* * (non-Javadoc) * * @see * com.sun.net.ssl.internal.ssl.X509ExtendedTrustManager#checkClientTrusted * (java.security.cert.X509Certificate[], java.lang.String, * java.lang.String, java.lang.String) */ /** {@inheritDoc} */ @Override public void checkClientTrusted(X509Certificate[] arg0, String arg1, String arg2, String arg3) throws CertificateException { } /* * (non-Javadoc) * * @see * com.sun.net.ssl.internal.ssl.X509ExtendedTrustManager#checkServerTrusted * (java.security.cert.X509Certificate[], java.lang.String, * java.lang.String, java.lang.String) */ /** {@inheritDoc} */ @Override public void checkServerTrusted(X509Certificate[] arg0, String arg1, String arg2, String arg3) throws CertificateException { } }
{ "content_hash": "272630d88893cf7e15bd418f26697065", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 124, "avg_line_length": 24.55952380952381, "alnum_prop": 0.7135239941832283, "repo_name": "denisneuling/cctrl.jar", "id": "6df8f695ad1f4396ba84a95fb752d3aa4300ce7e", "size": "2661", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cctrl-api-client/src/main/java/com/cloudcontrolled/api/client/security/DumbX509TrustManager.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "350214" } ], "symlink_target": "" }
<?php namespace Magento\Setup; use Magento\Setup\Mvc\View\Http\InjectTemplateListener; use Zend\EventManager\EventInterface; use Zend\ModuleManager\Feature\BootstrapListenerInterface; use Zend\ModuleManager\Feature\ConfigProviderInterface; use Zend\Mvc\ModuleRouteListener; use Zend\Mvc\MvcEvent; class Module implements BootstrapListenerInterface, ConfigProviderInterface { /** * {@inheritdoc} */ public function onBootstrap(EventInterface $e) { /** @var \Zend\Mvc\MvcEvent $e */ /** @var \Zend\Mvc\Application $application */ $application = $e->getApplication(); /** @var \Zend\EventManager\EventManager $events */ $events = $application->getEventManager(); /** @var \Zend\EventManager\SharedEventManager $sharedEvents */ $sharedEvents = $events->getSharedManager(); $moduleRouteListener = new ModuleRouteListener(); $moduleRouteListener->attach($events); // Override Zend\Mvc\View\Http\InjectTemplateListener // to process templates by Vendor/Module $injectTemplateListener = new InjectTemplateListener(); $sharedEvents->attach( 'Zend\Stdlib\DispatchableInterface', MvcEvent::EVENT_DISPATCH, [$injectTemplateListener, 'injectTemplate'], -89 ); $response = $e->getResponse(); if ($response instanceof \Zend\Http\Response) { $headers = $response->getHeaders(); if ($headers) { $headers->addHeaderLine('Cache-Control', 'no-cache, no-store, must-revalidate'); $headers->addHeaderLine('Pragma', 'no-cache'); $headers->addHeaderLine('Expires', '1970-01-01'); $headers->addHeaderLine('X-Frame-Options: SAMEORIGIN'); } } } /** * {@inheritdoc} */ public function getConfig() { $result = array_merge_recursive( include __DIR__ . '/../../../config/module.config.php', include __DIR__ . '/../../../config/router.config.php', include __DIR__ . '/../../../config/di.config.php', include __DIR__ . '/../../../config/states.install.config.php', include __DIR__ . '/../../../config/states.update.config.php', include __DIR__ . '/../../../config/states.home.config.php', include __DIR__ . '/../../../config/states.extensionManager.config.php', include __DIR__ . '/../../../config/states.upgrade.config.php', include __DIR__ . '/../../../config/states.uninstall.config.php', include __DIR__ . '/../../../config/states.enable.config.php', include __DIR__ . '/../../../config/states.disable.config.php', include __DIR__ . '/../../../config/languages.config.php', include __DIR__ . '/../../../config/marketplace.config.php' ); return $result; } }
{ "content_hash": "af53553583b3672cefa9f58e2feb8488", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 96, "avg_line_length": 38.93421052631579, "alnum_prop": 0.5772220344711051, "repo_name": "tarikgwa/test", "id": "be45abbb9dcd18c858712f2d781b02d98b7cf262", "size": "3057", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "html/setup.ori/src/Magento/Setup/Module.php", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "26588" }, { "name": "CSS", "bytes": "4874492" }, { "name": "HTML", "bytes": "8635167" }, { "name": "JavaScript", "bytes": "6810903" }, { "name": "PHP", "bytes": "55645559" }, { "name": "Perl", "bytes": "7938" }, { "name": "Shell", "bytes": "4505" }, { "name": "XSLT", "bytes": "19889" } ], "symlink_target": "" }
ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
{ "content_hash": "50b9db9e834513a26b1b24e562df898a", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 39, "avg_line_length": 10.307692307692308, "alnum_prop": 0.6940298507462687, "repo_name": "mdoering/backbone", "id": "7745a542c746aa8447a238bfa9aa41306bd773b5", "size": "191", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Magnoliophyta/Magnoliopsida/Malpighiales/Euphorbiaceae/Euphorbia/Euphorbia hamaderoensis/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
#pragma once #ifndef GEODE_INTEGRATION_TEST_THINCLIENTDURABLEINIT_H_ #define GEODE_INTEGRATION_TEST_THINCLIENTDURABLEINIT_H_ /* * ThinClientDurableInit.hpp * * Created on: Nov 3, 2008 * Author: abhaware */ bool isLocalServer = false; const char* durableIds[] = {"DurableId1", "DurableId2"}; static bool isLocator = false; static int numberOfLocators = 1; const char* locatorsG = CacheHelper::getLocatorHostPort(isLocator, isLocalServer, numberOfLocators); void initClientAndRegion( int redundancy, int ClientIdx, std::chrono::seconds subscriptionAckInterval = std::chrono::seconds(1), std::chrono::seconds redundancyMonitorInterval = std::chrono::seconds::zero(), std::chrono::seconds durableClientTimeout = std::chrono::seconds(60)) { auto pp = Properties::create(); if (ClientIdx < 2) { pp->insert("durable-client-id", durableIds[ClientIdx]); pp->insert("durable-timeout", durableClientTimeout); if (redundancyMonitorInterval > std::chrono::seconds::zero()) { pp->insert("redundancy-monitor-interval", redundancyMonitorInterval); } initClient(true, pp); getHelper()->createPoolWithLocators("__TESTPOOL1_", locatorsG, true, redundancy, subscriptionAckInterval); createRegionAndAttachPool(regionNames[0], USE_ACK, "__TESTPOOL1_", true); } } void initClientAndTwoRegions(int ClientIdx, int redundancy, std::chrono::seconds durableClientTimeout, const char* conflation = nullptr, const char* rNames[] = regionNames) { auto pp = Properties::create(); pp->insert("durable-client-id", durableIds[ClientIdx]); pp->insert("durable-timeout", durableClientTimeout); if (conflation) { pp->insert("conflate-events", conflation); } initClient(true, pp); getHelper()->createPoolWithLocators("__TESTPOOL1_", locatorsG, true, redundancy, std::chrono::seconds(1)); createRegionAndAttachPool(rNames[0], USE_ACK, "__TESTPOOL1_", true); createRegionAndAttachPool(rNames[1], USE_ACK, "__TESTPOOL1_", true); } void initClientAndTwoRegionsAndTwoPools( int ClientIdx, int redundancy, std::chrono::seconds durableClientTimeout, const char* conflation = nullptr, const char* rNames[] = regionNames) { auto pp = Properties::create(); pp->insert("durable-client-id", durableIds[ClientIdx]); pp->insert("durable-timeout", durableClientTimeout); if (conflation) { pp->insert("conflate-events", conflation); } initClient(true, pp); getHelper()->createPoolWithLocators("__TESTPOOL2_", locatorsG, true, redundancy, std::chrono::seconds(1)); createRegionAndAttachPool(rNames[1], USE_ACK, "__TESTPOOL2_", true); // Calling readyForEvents() here instead of below causes duplicate durableId // exception reproduced. /*LOG( "Calling readyForEvents:"); try { getHelper()->cachePtr->readyForEvents(); }catch(...) { LOG("Exception occured while sending readyForEvents"); }*/ auto regPtr1 = getHelper()->getRegion(rNames[1]); regPtr1->registerAllKeys(true); getHelper()->createPoolWithLocators("__TESTPOOL1_", locatorsG, true, redundancy, std::chrono::seconds(1)); createRegionAndAttachPool(rNames[0], USE_ACK, "__TESTPOOL1_", true); auto regPtr0 = getHelper()->getRegion(rNames[0]); regPtr0->registerAllKeys(true); LOG("Calling readyForEvents:"); try { getHelper()->cachePtr->readyForEvents(); } catch (...) { LOG("Exception occured while sending readyForEvents"); } } #endif // GEODE_INTEGRATION_TEST_THINCLIENTDURABLEINIT_H_
{ "content_hash": "984de6ae5498dd0762ea8af2ecfa60eb", "timestamp": "", "source": "github", "line_count": 99, "max_line_length": 80, "avg_line_length": 37.56565656565657, "alnum_prop": 0.6703414896477548, "repo_name": "mhansonp/geode-native", "id": "f57c856cb0e97c73eeb482c1dc24238801f30229", "size": "4521", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "cppcache/integration-test/ThinClientDurableInit.hpp", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1899" }, { "name": "C#", "bytes": "3515617" }, { "name": "C++", "bytes": "10771399" }, { "name": "CMake", "bytes": "107196" }, { "name": "GAP", "bytes": "73860" }, { "name": "Java", "bytes": "408387" }, { "name": "Perl", "bytes": "2704" }, { "name": "PowerShell", "bytes": "20450" }, { "name": "Shell", "bytes": "35505" } ], "symlink_target": "" }
<?xml version="1.0" encoding="UTF-8"?> <!-- - Copyright 2015 Red Hat Inc. and/or its affiliates and other contributors. - - Licensed under the Apache License, Version 2.0 (the "License") - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> <modelVersion>4.0.0</modelVersion> <artifactId>switchyard-admin</artifactId> <packaging>bundle</packaging> <name>SwitchYard: Admin</name> <description>SwitchYard Administration</description> <url>http://switchyard.org</url> <parent> <groupId>org.switchyard</groupId> <artifactId>switchyard-core-parent</artifactId> <version>2.1.0-SNAPSHOT</version> <relativePath>../pom.xml</relativePath> </parent> <properties> <switchyard.osgi.export.pkg> org.switchyard.admin.* </switchyard.osgi.export.pkg> </properties> <dependencies> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-api</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-common</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-config</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-deploy</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-extensions-java</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-extensions-wsdl</artifactId> </dependency> <dependency> <groupId>org.switchyard</groupId> <artifactId>switchyard-runtime</artifactId> </dependency> <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> <scope>test</scope> </dependency> </dependencies> </project>
{ "content_hash": "59a1c4caee972139e34eac5a1d32e05f", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 201, "avg_line_length": 39.89705882352941, "alnum_prop": 0.6483597493549577, "repo_name": "cunningt/switchyard", "id": "630d9ebbc3d8051a9df7f485c154cf198e287c91", "size": "2713", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "core/admin/pom.xml", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1387" }, { "name": "CSS", "bytes": "1428" }, { "name": "Clojure", "bytes": "239" }, { "name": "HTML", "bytes": "12878" }, { "name": "Java", "bytes": "9666412" }, { "name": "Ruby", "bytes": "1772" }, { "name": "XSLT", "bytes": "83579" } ], "symlink_target": "" }
<img src="https://fomkin.org/korolev/korolev-face-margin.svg" align="right" width="260" /> [![Build Status](https://travis-ci.org/fomkin/korolev.svg?branch=master)](https://travis-ci.org/fomkin/korolev) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ffomkin%2Fkorolev.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Ffomkin%2Fkorolev?ref=badge_shield) [![Gitter](https://badges.gitter.im/fomkin/korolev.svg)](https://gitter.im/fomkin/korolev?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Join the chat at https://telegram.me/korolev_io](https://img.shields.io/badge/chat-on_telegram_(russian)-0088cc.svg)](https://telegram.me/korolev_io) Not long ago we have entered the era of single-page applications. Some people say that we no longer need a server. They say that JavaScript applications can connect to DBMS directly. Fat clients. **We disagree with this.** This project is an attempt to solve the problems of modern fat web. Korolev runs a single-page application on the server side, keeping in the browser only a bridge to receive commands and send events. The page loads instantly and works fast, because it does a minimal amount of computation. It's important that Korolev provides a unified environment for full stack development. Client and server are now combined into a single app without any REST protocol or something else in the middle. ## Why? * Lightning-fast page loading speed (~6kB of uncompressed JS) * Comparable to static HTML client-side RAM consumption * Indexable pages out of the box * Routing out of the box * Build extremely large app without increasing size of the page * No need to make CRUD REST service * Connect to infrastructure (DBMS, Message queue) directly from application ## Examples * [Features](https://github.com/fomkin/korolev/tree/master/examples) * [Multiplayer match-three game build on Korolev](https://match3.fomkin.org/) * Goldbricker - Encrypted ToDo List (coming at summer 2020) ## Documentation * [User guide (open site)](https://fomkin.org/korolev/user-guide.html), [(download PDF)](https://fomkin.org/korolev/user-guide.pdf) * [API overview](https://www.javadoc.io/doc/org.fomkin/korolev_2.13/1.1.0) ## Articles * [Slimming pill for Web](https://dev.to/fomkin/korolev-slimming-pill-for-web-549a) * [Лекарство для веба](https://habr.com/ru/post/429028/) ## Tools * [HTML to Levsha DSL converter](https://fomkin.org/korolev/html-to-levsha) [comment]: <> ([![Browser support results]&#40;https://fomkin.org/korolev/browser-support.svg&#41;]&#40;https://saucelabs.com/u/yelbota&#41;) ## License [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ffomkin%2Fkorolev.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Ffomkin%2Fkorolev?ref=badge_large)
{ "content_hash": "1534deb2a7d831203e19a4461e77a029", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 421, "avg_line_length": 60.95652173913044, "alnum_prop": 0.7606990014265336, "repo_name": "fomkin/korolev", "id": "341a843379ea1ee99367284f3b25780a482ba4de", "size": "2831", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "README.md", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "307" }, { "name": "JavaScript", "bytes": "25899" }, { "name": "Scala", "bytes": "625736" } ], "symlink_target": "" }
package org.kiji.mapreduce.lib.graph; import java.util.TreeMap; import org.kiji.mapreduce.lib.avro.Edge; import org.kiji.mapreduce.lib.avro.Node; /** * Configures and builds an Edge. */ public class EdgeBuilder { public static final double DEFAULT_EDGE_WEIGHT = 1.0; private Edge mEdge; /** * Creates a new <code>EdgeBuilder</code> instance wrapping an existing Edge. * * @param edge An edge. */ public EdgeBuilder(Edge edge) { if (null == edge) { throw new IllegalArgumentException("edge is null"); } mEdge = edge; } /** * Build an edge from scratch with default values. */ public EdgeBuilder() { mEdge = new Edge(); mEdge.setLabel((String) null); mEdge.setWeight(DEFAULT_EDGE_WEIGHT); mEdge.setTarget(null); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param weight An edge weight. */ @Deprecated public EdgeBuilder(double weight) { this(null, weight, null); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param label An edge label. */ @Deprecated public EdgeBuilder(String label) { this(label, DEFAULT_EDGE_WEIGHT); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param label An edge label. * @param weight An edge weight. */ @Deprecated public EdgeBuilder(String label, double weight) { this(label, weight, null); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param weight An edge weight. * @param target The target node of the edge. */ @Deprecated public EdgeBuilder(double weight, Node target) { this(null, weight, target); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param label An edge label. * @param target The target node of the edge. */ @Deprecated public EdgeBuilder(String label, Node target) { this(label, DEFAULT_EDGE_WEIGHT, target); } /** * Creates a new <code>EdgeBuilder</code> instance. * * @param label An edge label. * @param weight An edge weight. * @param target The target node of the edge. */ @Deprecated public EdgeBuilder(String label, double weight, Node target) { mEdge = new Edge(); mEdge.setLabel(label); mEdge.setWeight(weight); mEdge.setTarget(target); } /** * Sets the label of the edge. * * @param label An edge label. * @return The <code>EdgeBuilder</code> instance. */ public EdgeBuilder setLabel(String label) { mEdge.setLabel(label); return this; } /** * Sets the weight of the edge. * * @param weight An edge weight. * @return The <code>EdgeBuilder</code> instance. */ public EdgeBuilder setWeight(double weight) { mEdge.setWeight(weight); return this; } /** * Sets the target node of the edge. * * @param target The target node. * @return The <code>EdgeBuilder</code> instance. */ public EdgeBuilder setTarget(Node target) { mEdge.setTarget(target); return this; } /** * Sets the target node of the edge. * * @param label The label for the target node. * @return The <code>EdgeBuilder</code> instance. */ @Deprecated public EdgeBuilder setTarget(String label) { return setTarget(label, NodeBuilder.DEFAULT_NODE_WEIGHT); } /** * Sets the target node of the edge. * * @param weight The weight of the target node. * @return The <code>EdgeBuilder</code> instance. */ @Deprecated public EdgeBuilder setTarget(double weight) { return setTarget(null, weight); } /** * Sets the target node of the edge. * * @param label The label of the target node. * @param weight The weight of the target node. * @return The <code>EdgeBuilder</code> instance. */ @Deprecated public EdgeBuilder setTarget(String label, double weight) { target(label, weight); return this; } /** * Gets a node builder for the target node of the edge. * * @return The <code>NodeBuilder</code> instance for the target of the edge. */ public NodeBuilder target() { return target(null); } /** * Sets the target node of the edge and returns a builder for it. * * @param label The label of the target node. * @return The <code>NodeBuilder</code> instance for the target of the edge. */ public NodeBuilder target(String label) { return target(label, NodeBuilder.DEFAULT_NODE_WEIGHT); } /** * Sets the target node of the edge and returns a builder for it. * * @param weight The weight of the of the target node. * @return The <code>NodeBuilder</code> instance for the target of the edge. */ public NodeBuilder target(double weight) { return target(null, weight); } /** * Sets the target node of the edge and returns a builder for it. * * @param label The label of the target node. * @param weight The weight of the target node. * @return The <code>NodeBuilder</code> instance for the target of the edge. */ public NodeBuilder target(String label, double weight) { Node target = new Node(); mEdge.setTarget(target); return new NodeBuilder(target).setLabel(label).setWeight(weight); } /** * Adds an annotation to the edge. * * @param key The annotation key. * @param value The annotation value. * @return The <code>EdgeBuilder</code> instance. */ public EdgeBuilder addAnnotation(String key, String value) { if (null == mEdge.getAnnotations()) { mEdge.setAnnotations(new TreeMap<String, String>()); } mEdge.getAnnotations().put(key, value); return this; } /** * Returns the edge. * * @return The built edge. */ public Edge build() { return mEdge; } }
{ "content_hash": "d3e1eb7c366d30da62ed516b88d5741c", "timestamp": "", "source": "github", "line_count": 241, "max_line_length": 79, "avg_line_length": 23.821576763485478, "alnum_prop": 0.646577251349939, "repo_name": "iafek/kiji-mapreduce", "id": "484aca39de49ea6a6de14f5aee808aa20c617fc3", "size": "6457", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "kiji-mapreduce-lib/kiji-mapreduce-lib/src/main/java/org/kiji/mapreduce/lib/graph/EdgeBuilder.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "1525831" }, { "name": "Python", "bytes": "18963" }, { "name": "Scala", "bytes": "47132" } ], "symlink_target": "" }
ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name Commelina dielsii Herter ### Remarks null
{ "content_hash": "7739a18b61a42400c616dd497118defa", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 39, "avg_line_length": 11.846153846153847, "alnum_prop": 0.7207792207792207, "repo_name": "mdoering/backbone", "id": "583926e761e7a85e660fcc2beb85b29a6b09e3a2", "size": "202", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "life/Plantae/Magnoliophyta/Liliopsida/Commelinales/Commelinaceae/Commelina/Commelina erecta/Commelina erecta dielsii/README.md", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
package store import ( "database/sql" "fmt" _ "github.com/lib/pq" "github.com/heetch/sqalx" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "github.com/thoas/observr/configuration" "github.com/thoas/observr/store/models" ) type DataStore struct { connection sqalx.Node } func NewDataStore(cfg configuration.Data) (*DataStore, error) { dbx, err := sqlx.Connect("postgres", cfg.DSN) if err != nil { return nil, errors.Wrap(err, "cannot connect to postgres server") } dbx.SetMaxIdleConns(cfg.MaxIdleConnections) dbx.SetMaxOpenConns(cfg.MaxOpenConnections) node, err := sqalx.New(dbx) if err != nil { return nil, errors.Wrap(err, "cannot instantiate postgres client driver") } return &DataStore{ connection: node, }, nil } func Load(cfg configuration.Data) (*DataStore, error) { return NewDataStore(cfg) } var Models = []models.Model{ &models.VisitTag{}, &models.GroupTag{}, &models.Tag{}, &models.Visit{}, &models.Project{}, &models.User{}, } // Connection returns SQLStore current connection. func (s *DataStore) Connection() sqalx.Node { return s.connection } func (s *DataStore) Close() error { return s.Connection().Close() } func (s *DataStore) Flush() error { for _, model := range Models { tableName := model.TableName() row, err := s.Connection().Query(fmt.Sprintf("TRUNCATE %s CASCADE", tableName)) if err != nil { return errors.Wrap(err, fmt.Sprintf("cannot truncate %s", tableName)) } defer row.Close() } return nil } // Ping pings the storage to know if it's alive. func (s *DataStore) Ping() error { row, err := s.Connection().Query("SELECT true") if row != nil { defer func() { // Cannot captures or logs this error. thr := row.Close() _ = thr }() } if err != nil { return errors.Wrap(err, "cannot ping database") } return nil } // IsErrNoRows returns if given error is a "no rows" error. func IsErrNoRows(err error) bool { return errors.Cause(err) == sql.ErrNoRows }
{ "content_hash": "ad32c3b62b50398a061a9dbfbf7921ce", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 81, "avg_line_length": 20.142857142857142, "alnum_prop": 0.6828774062816616, "repo_name": "thoas/observr", "id": "71d5fbf2a9d736ae9c4ed2ec11ec4db5f10d300b", "size": "1974", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "store/store.go", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "37898" }, { "name": "Makefile", "bytes": "578" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "6667" } ], "symlink_target": "" }
package org.apache.camel.component.as2.api.entity; import org.apache.camel.component.as2.api.util.AS2HeaderUtils.Parameter; import org.junit.Test; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; public class DispositionNotificationOptionsParserTest { private static final String TEST_NAME_VALUES = " signed-receipt-protocol = optional , pkcs7-signature ; signed-receipt-micalg = required , sha1 "; private static final String SIGNED_RECEIPT_PROTOCOL_ATTRIBUTE = "signed-receipt-protocol"; private static final String SIGNED_RECEIPT_PROTOCOL_IMPORTANCE = "optional"; private static final String[] SIGNED_RECEIPT_PROTOCOL_VALUES = {"pkcs7-signature"}; private static final String SIGNED_RECEIPT_MICALG_ATTRIBUTE = "signed-receipt-micalg"; private static final String SIGNED_RECEIPT_MICALG_IMPORTANCE = "required"; private static final String[] SIGNED_RECEIPT_MICALG_VALUES = {"sha1"}; @Test public void parseDispositionNotificationOptionsTest() { DispositionNotificationOptions dispositionNotificationOptions = DispositionNotificationOptionsParser.parseDispositionNotificationOptions(TEST_NAME_VALUES, null); Parameter signedReceiptProtocol = dispositionNotificationOptions.getSignedReceiptProtocol(); assertNotNull("signed receipt protocol not parsed", signedReceiptProtocol); assertEquals("Unexpected value for signed receipt protocol attribute", SIGNED_RECEIPT_PROTOCOL_ATTRIBUTE, signedReceiptProtocol.getAttribute()); assertEquals("Unexpected value for signed receipt protocol importance", SIGNED_RECEIPT_PROTOCOL_IMPORTANCE, signedReceiptProtocol.getImportance().getImportance()); assertArrayEquals("Unexpected value for parameter importance", SIGNED_RECEIPT_PROTOCOL_VALUES, signedReceiptProtocol.getValues()); Parameter signedReceiptMicalg = dispositionNotificationOptions.getSignedReceiptMicalg(); assertNotNull("signed receipt micalg not parsed", signedReceiptProtocol); assertEquals("Unexpected value for signed receipt micalg attribute", SIGNED_RECEIPT_MICALG_ATTRIBUTE, signedReceiptMicalg.getAttribute()); assertEquals("Unexpected value for signed receipt micalg importance", SIGNED_RECEIPT_MICALG_IMPORTANCE, signedReceiptMicalg.getImportance().getImportance()); assertArrayEquals("Unexpected value for micalg importance", SIGNED_RECEIPT_MICALG_VALUES, signedReceiptMicalg.getValues()); } }
{ "content_hash": "1124063eff47734740236a232fe33550", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 171, "avg_line_length": 67.15789473684211, "alnum_prop": 0.7884012539184952, "repo_name": "anoordover/camel", "id": "d9198ec0c716a4db61a5cdf51c72d9baa6a9d37e", "size": "3355", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "components/camel-as2/camel-as2-api/src/test/java/org/apache/camel/component/as2/api/entity/DispositionNotificationOptionsParserTest.java", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Apex", "bytes": "6519" }, { "name": "Batchfile", "bytes": "1518" }, { "name": "CSS", "bytes": "30373" }, { "name": "Elm", "bytes": "10852" }, { "name": "FreeMarker", "bytes": "11410" }, { "name": "Groovy", "bytes": "44835" }, { "name": "HTML", "bytes": "903016" }, { "name": "Java", "bytes": "74383981" }, { "name": "JavaScript", "bytes": "90399" }, { "name": "Makefile", "bytes": "513" }, { "name": "Python", "bytes": "36" }, { "name": "Ruby", "bytes": "4802" }, { "name": "Scala", "bytes": "323982" }, { "name": "Shell", "bytes": "17120" }, { "name": "Tcl", "bytes": "4974" }, { "name": "Thrift", "bytes": "6979" }, { "name": "XQuery", "bytes": "546" }, { "name": "XSLT", "bytes": "288715" } ], "symlink_target": "" }
<?php namespace Usyninis\Wucms; use Illuminate\Database\Eloquent\Model as Eloquent; class UnitProp extends Eloquent { public $timestamps = false; protected $table = 'prop_unit'; public function Units() { return $this->belongsTo('Unit'); } public function prop() { return $this->belongsTo('Prop'); } }
{ "content_hash": "dffa244933fa856fe35d7cab0661f2e1", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 51, "avg_line_length": 13, "alnum_prop": 0.6861538461538461, "repo_name": "usyninis/wucms-laravel", "id": "e69cd4d3adfe1f5d5e0e73869d203fed4c6f0b69", "size": "325", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/models/UnitProp.php", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "53755" }, { "name": "HTML", "bytes": "213" }, { "name": "JavaScript", "bytes": "146751" }, { "name": "PHP", "bytes": "207144" } ], "symlink_target": "" }